Пример #1
0
func WriteConfig(stateStore fi.StateStore, cluster *Cluster, groups []*InstanceGroup) error {
	// Check for instancegroup Name duplicates before writing
	{
		names := map[string]bool{}
		for i, ns := range groups {
			if ns.Name == "" {
				return fmt.Errorf("InstanceGroup #%d did not have a Name", i+1)
			}
			if names[ns.Name] {
				return fmt.Errorf("Duplicate InstanceGroup Name found: %q", ns.Name)
			}
			names[ns.Name] = true
		}
	}
	if cluster.CreationTimestamp.IsZero() {
		cluster.CreationTimestamp = unversioned.NewTime(time.Now().UTC())
	}
	err := stateStore.WriteConfig("config", cluster)
	if err != nil {
		return fmt.Errorf("error writing updated cluster configuration: %v", err)
	}

	for _, ns := range groups {
		if ns.CreationTimestamp.IsZero() {
			ns.CreationTimestamp = unversioned.NewTime(time.Now().UTC())
		}
		err = stateStore.WriteConfig("instancegroup/"+ns.Name, ns)
		if err != nil {
			return fmt.Errorf("error writing updated instancegroup configuration: %v", err)
		}
	}

	return nil
}
Пример #2
0
func TestStoreMultipleDataInput(t *testing.T) {
	fakeSink := NewFakeSink()
	timestamp := time.Now()

	now := time.Now()
	event1 := kube_api.Event{
		Message:        "event1",
		Count:          100,
		LastTimestamp:  kube_api_unversioned.NewTime(now),
		FirstTimestamp: kube_api_unversioned.NewTime(now),
	}

	event2 := kube_api.Event{
		Message:        "event2",
		Count:          101,
		LastTimestamp:  kube_api_unversioned.NewTime(now),
		FirstTimestamp: kube_api_unversioned.NewTime(now),
	}

	data := core.EventBatch{
		Timestamp: timestamp,
		Events: []*kube_api.Event{
			&event1,
			&event2,
		},
	}

	fakeSink.ExportEvents(&data)
	assert.Equal(t, 2, len(fakeSink.fakeDbClient.Pnts))
}
Пример #3
0
// convert ContainerStatus to api.ContainerStatus.
func containerStatusToAPIContainerStatus(containerStatus *kubecontainer.ContainerStatus) *api.ContainerStatus {
	containerID := DockerPrefix + containerStatus.ID.ID
	status := api.ContainerStatus{
		Name:         containerStatus.Name,
		RestartCount: containerStatus.RestartCount,
		Image:        containerStatus.Image,
		ImageID:      containerStatus.ImageID,
		ContainerID:  containerID,
	}
	switch containerStatus.State {
	case kubecontainer.ContainerStateRunning:
		status.State.Running = &api.ContainerStateRunning{StartedAt: unversioned.NewTime(containerStatus.StartedAt)}
	case kubecontainer.ContainerStateExited:
		status.State.Terminated = &api.ContainerStateTerminated{
			ExitCode:    containerStatus.ExitCode,
			Reason:      containerStatus.Reason,
			Message:     containerStatus.Message,
			StartedAt:   unversioned.NewTime(containerStatus.StartedAt),
			FinishedAt:  unversioned.NewTime(containerStatus.FinishedAt),
			ContainerID: containerID,
		}
	default:
		status.State.Waiting = &api.ContainerStateWaiting{}
	}
	return &status
}
Пример #4
0
func (a *Api) getNodeMetrics(node string) *v1alpha1.NodeMetrics {
	batch := a.metricSink.GetLatestDataBatch()
	if batch == nil {
		return nil
	}

	ms, found := batch.MetricSets[core.NodeKey(node)]
	if !found {
		return nil
	}

	usage, err := parseResourceList(ms)
	if err != nil {
		return nil
	}

	return &v1alpha1.NodeMetrics{
		ObjectMeta: kube_v1.ObjectMeta{
			Name:              node,
			CreationTimestamp: kube_unversioned.NewTime(time.Now()),
		},
		Timestamp: kube_unversioned.NewTime(batch.Timestamp),
		Window:    kube_unversioned.Duration{Duration: time.Minute},
		Usage:     usage,
	}
}
Пример #5
0
func (r *runtime) getContainerStatus(container ContainerStatus, image, imageID string) api.ContainerStatus {
	var status api.ContainerStatus

	_, _, _, containerName, _, err := r.parseHyperContainerFullName(container.Name)
	if err != nil {
		return status
	}

	status.Name = strings.Split(containerName, ".")[0]
	status.ContainerID = r.buildContainerID(container.ContainerID)
	status.Image = image
	status.ImageID = imageID

	switch container.Phase {
	case StatusRunning:
		runningStartedAt, err := parseTimeString(container.Running.StartedAt)
		if err != nil {
			glog.Errorf("Hyper: can't parse runningStartedAt %s", container.Running.StartedAt)
			return status
		}

		status.State = api.ContainerState{
			Running: &api.ContainerStateRunning{
				StartedAt: unversioned.NewTime(runningStartedAt),
			},
		}
	case StatusPending:
		status.State = api.ContainerState{
			Waiting: &api.ContainerStateWaiting{
				Reason: container.Waiting.Reason,
			},
		}
	case StatusFailed, StatusSuccess:
		terminatedStartedAt, err := parseTimeString(container.Terminated.StartedAt)
		if err != nil {
			glog.Errorf("Hyper: can't parse terminatedStartedAt %s", container.Terminated.StartedAt)
			return status
		}

		terminatedFinishedAt, err := parseTimeString(container.Terminated.FinishedAt)
		if err != nil {
			glog.Errorf("Hyper: can't parse terminatedFinishedAt %s", container.Terminated.FinishedAt)
			return status
		}

		status.State = api.ContainerState{
			Terminated: &api.ContainerStateTerminated{
				ExitCode:   container.Terminated.ExitCode,
				Reason:     container.Terminated.Reason,
				Message:    container.Terminated.Message,
				StartedAt:  unversioned.NewTime(terminatedStartedAt),
				FinishedAt: unversioned.NewTime(terminatedFinishedAt),
			},
		}
	default:
		glog.Warningf("Hyper: Unknown pod state: %q", container.Phase)
	}

	return status
}
Пример #6
0
func (m *MetricStorage) getPodMetrics(pod *api.Pod) *metrics.PodMetrics {
	batch := m.metricSink.GetLatestDataBatch()
	if batch == nil {
		return nil
	}

	res := &metrics.PodMetrics{
		ObjectMeta: api.ObjectMeta{
			Name:              pod.Name,
			Namespace:         pod.Namespace,
			CreationTimestamp: unversioned.NewTime(time.Now()),
		},
		Timestamp:  unversioned.NewTime(batch.Timestamp),
		Window:     unversioned.Duration{Duration: time.Minute},
		Containers: make([]metrics.ContainerMetrics, 0),
	}

	for _, c := range pod.Spec.Containers {
		ms, found := batch.MetricSets[core.PodContainerKey(pod.Namespace, pod.Name, c.Name)]
		if !found {
			glog.Infof("No metrics for container %s in pod %s/%s", c.Name, pod.Namespace, pod.Name)
			return nil
		}
		usage, err := util.ParseResourceList(ms)
		if err != nil {
			return nil
		}
		res.Containers = append(res.Containers, metrics.ContainerMetrics{Name: c.Name, Usage: usage})
	}

	return res
}
Пример #7
0
func (m *MetricStorage) getNodeMetrics(node string) *metrics.NodeMetrics {
	batch := m.metricSink.GetLatestDataBatch()
	if batch == nil {
		return nil
	}

	ms, found := batch.MetricSets[core.NodeKey(node)]
	if !found {
		return nil
	}

	usage, err := util.ParseResourceList(ms)
	if err != nil {
		return nil
	}

	return &metrics.NodeMetrics{
		ObjectMeta: api.ObjectMeta{
			Name:              node,
			CreationTimestamp: unversioned.NewTime(time.Now()),
		},
		Timestamp: unversioned.NewTime(batch.Timestamp),
		Window:    unversioned.Duration{Duration: time.Minute},
		Usage:     usage,
	}
}
Пример #8
0
// BeforeDelete tests whether the object can be gracefully deleted. If graceful is set the object
// should be gracefully deleted, if gracefulPending is set the object has already been gracefully deleted
// (and the provided grace period is longer than the time to deletion), and an error is returned if the
// condition cannot be checked or the gracePeriodSeconds is invalid. The options argument may be updated with
// default values if graceful is true. Second place where we set deletionTimestamp is pkg/registry/generic/registry/store.go
// this function is responsible for setting deletionTimestamp during gracefulDeletion, other one for cascading deletions.
func BeforeDelete(strategy RESTDeleteStrategy, ctx api.Context, obj runtime.Object, options *api.DeleteOptions) (graceful, gracefulPending bool, err error) {
	objectMeta, gvk, kerr := objectMetaAndKind(strategy, obj)
	if kerr != nil {
		return false, false, kerr
	}
	// Checking the Preconditions here to fail early. They'll be enforced later on when we actually do the deletion, too.
	if options.Preconditions != nil && options.Preconditions.UID != nil && *options.Preconditions.UID != objectMeta.UID {
		return false, false, errors.NewConflict(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, objectMeta.Name, fmt.Errorf("the UID in the precondition (%s) does not match the UID in record (%s). The object might have been deleted and then recreated", *options.Preconditions.UID, objectMeta.UID))
	}
	gracefulStrategy, ok := strategy.(RESTGracefulDeleteStrategy)
	if !ok {
		// If we're not deleting gracefully there's no point in updating Generation, as we won't update
		// the obcject before deleting it.
		return false, false, nil
	}
	// if the object is already being deleted, no need to update generation.
	if objectMeta.DeletionTimestamp != nil {
		// if we are already being deleted, we may only shorten the deletion grace period
		// this means the object was gracefully deleted previously but deletionGracePeriodSeconds was not set,
		// so we force deletion immediately
		if objectMeta.DeletionGracePeriodSeconds == nil {
			return false, false, nil
		}
		// only a shorter grace period may be provided by a user
		if options.GracePeriodSeconds != nil {
			period := int64(*options.GracePeriodSeconds)
			if period >= *objectMeta.DeletionGracePeriodSeconds {
				return false, true, nil
			}
			newDeletionTimestamp := unversioned.NewTime(
				objectMeta.DeletionTimestamp.Add(-time.Second * time.Duration(*objectMeta.DeletionGracePeriodSeconds)).
					Add(time.Second * time.Duration(*options.GracePeriodSeconds)))
			objectMeta.DeletionTimestamp = &newDeletionTimestamp
			objectMeta.DeletionGracePeriodSeconds = &period
			return true, false, nil
		}
		// graceful deletion is pending, do nothing
		options.GracePeriodSeconds = objectMeta.DeletionGracePeriodSeconds
		return false, true, nil
	}

	if !gracefulStrategy.CheckGracefulDelete(ctx, obj, options) {
		return false, false, nil
	}
	now := unversioned.NewTime(unversioned.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds)))
	objectMeta.DeletionTimestamp = &now
	objectMeta.DeletionGracePeriodSeconds = options.GracePeriodSeconds
	// If it's the first graceful deletion we are going to set the DeletionTimestamp to non-nil.
	// Controllers of the object that's being deleted shouldn't take any nontrivial actions, hence its behavior changes.
	// Thus we need to bump object's Generation (if set). This handles generation bump during graceful deletion.
	// The bump for objects that don't support graceful deletion is handled in pkg/registry/generic/registry/store.go.
	if objectMeta.Generation > 0 {
		objectMeta.Generation++
	}
	return true, false, nil
}
Пример #9
0
func TestBuildDecorator(t *testing.T) {
	build := &buildapi.Build{
		ObjectMeta: kapi.ObjectMeta{Name: "buildid", Namespace: "default"},
		Spec: buildapi.BuildSpec{
			Source: buildapi.BuildSource{
				Git: &buildapi.GitBuildSource{
					URI: "http://github.com/my/repository",
				},
				ContextDir: "context",
			},
			Strategy: buildapi.BuildStrategy{
				DockerStrategy: &buildapi.DockerBuildStrategy{},
			},
			Output: buildapi.BuildOutput{
				To: &kapi.ObjectReference{
					Kind: "DockerImage",
					Name: "repository/data",
				},
			},
		},
		Status: buildapi.BuildStatus{
			Phase: buildapi.BuildPhaseNew,
		},
	}
	now := unversioned.Now()
	startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute))
	build.Status.StartTimestamp = &startTime
	err := Decorator(build)
	if err != nil {
		t.Errorf("Unexpected error decorating build")
	}
	if build.Status.Duration <= 0 {
		t.Errorf("Build duration should be greater than zero")
	}
}
Пример #10
0
func (sb *summaryBuilder) containerInfoV2ToNetworkStats(info *cadvisorapiv2.ContainerInfo) *stats.NetworkStats {
	if !info.Spec.HasNetwork {
		return nil
	}
	cstat, found := sb.latestContainerStats(info)
	if !found {
		return nil
	}
	var (
		rxBytes  uint64
		rxErrors uint64
		txBytes  uint64
		txErrors uint64
	)
	// TODO(stclair): check for overflow
	for _, inter := range cstat.Network.Interfaces {
		rxBytes += inter.RxBytes
		rxErrors += inter.RxErrors
		txBytes += inter.TxBytes
		txErrors += inter.TxErrors
	}
	return &stats.NetworkStats{
		Time:     unversioned.NewTime(cstat.Timestamp),
		RxBytes:  &rxBytes,
		RxErrors: &rxErrors,
		TxBytes:  &txBytes,
		TxErrors: &txErrors,
	}
}
Пример #11
0
// TestGracefulStoreCanDeleteIfExistingGracePeriodZero tests recovery from
// race condition where the graceful delete is unable to complete
// in prior operation, but the pod remains with deletion timestamp
// and grace period set to 0.
func TestGracefulStoreCanDeleteIfExistingGracePeriodZero(t *testing.T) {
	deletionTimestamp := unversioned.NewTime(time.Now())
	deletionGracePeriodSeconds := int64(0)
	initialGeneration := int64(1)
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:                       "foo",
			Generation:                 initialGeneration,
			DeletionGracePeriodSeconds: &deletionGracePeriodSeconds,
			DeletionTimestamp:          &deletionTimestamp,
		},
		Spec: api.PodSpec{NodeName: "machine"},
	}

	testContext := api.WithNamespace(api.NewContext(), "test")
	destroyFunc, registry := NewTestGenericStoreRegistry(t)
	defaultDeleteStrategy := testRESTStrategy{api.Scheme, api.SimpleNameGenerator, true, false, true}
	registry.DeleteStrategy = testGracefulStrategy{defaultDeleteStrategy}
	defer destroyFunc()

	graceful, gracefulPending, err := rest.BeforeDelete(registry.DeleteStrategy, testContext, pod, api.NewDeleteOptions(0))
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}
	if graceful {
		t.Fatalf("graceful should be false if object has DeletionTimestamp and DeletionGracePeriodSeconds is 0")
	}
	if gracefulPending {
		t.Fatalf("gracefulPending should be false if object has DeletionTimestamp and DeletionGracePeriodSeconds is 0")
	}
}
Пример #12
0
func (sb *summaryBuilder) containerInfoV2ToStats(
	name string,
	info *cadvisorapiv2.ContainerInfo) ContainerStats {
	stats := ContainerStats{
		Name:      name,
		StartTime: unversioned.NewTime(info.Spec.CreationTime),
	}
	cstat, found := sb.latestContainerStats(info)
	if !found {
		return stats
	}
	if info.Spec.HasCpu {
		cpuStats := CPUStats{}
		if cstat.CpuInst != nil {
			cpuStats.UsageNanoCores = &cstat.CpuInst.Usage.Total
		}
		if cstat.Cpu != nil {
			cpuStats.UsageCoreNanoSeconds = &cstat.Cpu.Usage.Total
		}
		stats.CPU = &cpuStats
	}
	if info.Spec.HasMemory {
		pageFaults := cstat.Memory.ContainerData.Pgfault
		majorPageFaults := cstat.Memory.ContainerData.Pgmajfault
		stats.Memory = &MemoryStats{
			UsageBytes:      &cstat.Memory.Usage,
			WorkingSetBytes: &cstat.Memory.WorkingSet,
			PageFaults:      &pageFaults,
			MajorPageFaults: &majorPageFaults,
		}
	}
	sb.containerInfoV2FsStats(info, &stats)
	stats.UserDefinedMetrics = sb.containerInfoV2ToUserDefinedMetrics(info)
	return stats
}
Пример #13
0
func TestFilterBeforePredicate(t *testing.T) {
	youngerThan := time.Hour
	now := unversioned.Now()
	old := unversioned.NewTime(now.Time.Add(-1 * youngerThan))
	builds := []*buildapi.Build{
		{
			ObjectMeta: kapi.ObjectMeta{
				Name:              "old",
				CreationTimestamp: old,
			},
		},
		{
			ObjectMeta: kapi.ObjectMeta{
				Name:              "new",
				CreationTimestamp: now,
			},
		},
	}
	filter := &andFilter{
		filterPredicates: []FilterPredicate{NewFilterBeforePredicate(youngerThan)},
	}
	result := filter.Filter(builds)
	if len(result) != 1 {
		t.Errorf("Unexpected number of results")
	}
	if expected, actual := "old", result[0].Name; expected != actual {
		t.Errorf("expected %v, actual %v", expected, actual)
	}
}
Пример #14
0
func TestNodeConditionsObservedSince(t *testing.T) {
	now := unversioned.Now()
	observedTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute))
	testCases := map[string]struct {
		observedAt nodeConditionsObservedAt
		period     time.Duration
		now        time.Time
		result     []api.NodeConditionType
	}{
		"in-period": {
			observedAt: nodeConditionsObservedAt{
				api.NodeMemoryPressure: observedTime.Time,
			},
			period: 2 * time.Minute,
			now:    now.Time,
			result: []api.NodeConditionType{api.NodeMemoryPressure},
		},
		"out-of-period": {
			observedAt: nodeConditionsObservedAt{
				api.NodeMemoryPressure: observedTime.Time,
			},
			period: 30 * time.Second,
			now:    now.Time,
			result: []api.NodeConditionType{},
		},
	}
	for testName, testCase := range testCases {
		actual := nodeConditionsObservedSince(testCase.observedAt, testCase.period, testCase.now)
		if !nodeConditionList(actual).Equal(nodeConditionList(testCase.result)) {
			t.Errorf("Test case: %s, expected: %v, actual: %v", testName, testCase.result, actual)
		}
	}
}
Пример #15
0
// buildSummaryPods aggregates and returns the container stats in cinfos by the Pod managing the container.
// Containers not managed by a Pod are omitted.
func (sb *summaryBuilder) buildSummaryPods() []PodStats {
	// Map each container to a pod and update the PodStats with container data
	podToStats := map[PodReference]*PodStats{}
	for _, cinfo := range sb.infos {
		// Build the Pod key if this container is managed by a Pod
		if !sb.isPodManagedContainer(&cinfo) {
			continue
		}
		ref := sb.buildPodRef(&cinfo)

		// Lookup the PodStats for the pod using the PodRef.  If none exists, initialize a new entry.
		stats, found := podToStats[ref]
		if !found {
			stats = &PodStats{PodRef: ref}
			podToStats[ref] = stats
		}

		// Update the PodStats entry with the stats from the container by adding it to stats.Containers
		containerName := dockertools.GetContainerName(cinfo.Spec.Labels)
		if containerName == leaky.PodInfraContainerName {
			// Special case for infrastructure container which is hidden from the user and has network stats
			stats.Network = sb.containerInfoV2ToNetworkStats(&cinfo)
			stats.StartTime = unversioned.NewTime(cinfo.Spec.CreationTime)
		} else {
			stats.Containers = append(stats.Containers, sb.containerInfoV2ToStats(containerName, &cinfo))
		}
	}

	// Add each PodStats to the result
	result := make([]PodStats, 0, len(podToStats))
	for _, stats := range podToStats {
		result = append(result, *stats)
	}
	return result
}
Пример #16
0
// NewFilterBeforePredicate is a function that returns true if the build was created before the current time minus specified duration
func NewFilterBeforePredicate(d time.Duration) FilterPredicate {
	now := unversioned.Now()
	before := unversioned.NewTime(now.Time.Add(-1 * d))
	return func(item *kapi.ReplicationController) bool {
		return item.CreationTimestamp.Before(before)
	}
}
Пример #17
0
// NewFilterBeforePredicate is a function that returns true if the build was created before the current time minus specified duration
func NewFilterBeforePredicate(d time.Duration) FilterPredicate {
	now := unversioned.Now()
	before := unversioned.NewTime(now.Time.Add(-1 * d))
	return func(build *buildapi.Build) bool {
		return build.CreationTimestamp.Before(before)
	}
}
Пример #18
0
func sizedImage(id, ref string, size int64) imageapi.Image {
	image := imageWithLayers(id, ref, false, layer1, layer2, layer3, layer4, layer5)
	image.CreationTimestamp = unversioned.NewTime(unversioned.Now().Add(time.Duration(-1) * time.Minute))
	image.DockerImageMetadata.Size = size

	return image
}
Пример #19
0
func TestScrapeSummaryMetrics(t *testing.T) {
	summary := stats.Summary{
		Node: stats.NodeStats{
			NodeName:  nodeInfo.NodeName,
			StartTime: unversioned.NewTime(startTime),
		},
	}
	data, err := json.Marshal(&summary)
	require.NoError(t, err)

	server := httptest.NewServer(&util.FakeHandler{
		StatusCode:   200,
		ResponseBody: string(data),
		T:            t,
	})
	defer server.Close()

	ms := testingSummaryMetricsSource()
	split := strings.SplitN(strings.Replace(server.URL, "http://", "", 1), ":", 2)
	ms.node.IP = split[0]
	ms.node.Port, err = strconv.Atoi(split[1])
	require.NoError(t, err)

	res := ms.ScrapeMetrics(time.Now(), time.Now())
	assert.Equal(t, res.MetricSets["node:test"].Labels[core.LabelMetricSetType.Key], core.MetricSetTypeNode)
}
Пример #20
0
// TestSort verifies that builds are sorted by most recently created
func TestSort(t *testing.T) {
	present := unversioned.Now()
	past := unversioned.NewTime(present.Time.Add(-1 * time.Minute))
	controllers := []*kapi.ReplicationController{
		{
			ObjectMeta: kapi.ObjectMeta{
				Name:              "past",
				CreationTimestamp: past,
			},
		},
		{
			ObjectMeta: kapi.ObjectMeta{
				Name:              "present",
				CreationTimestamp: present,
			},
		},
	}
	sort.Sort(sortableReplicationControllers(controllers))
	if controllers[0].Name != "present" {
		t.Errorf("Unexpected sort order")
	}
	if controllers[1].Name != "past" {
		t.Errorf("Unexpected sort order")
	}
}
Пример #21
0
// BeforeDelete tests whether the object can be gracefully deleted. If graceful is set the object
// should be gracefully deleted, if gracefulPending is set the object has already been gracefully deleted
// (and the provided grace period is longer than the time to deletion), and an error is returned if the
// condition cannot be checked or the gracePeriodSeconds is invalid. The options argument may be updated with
// default values if graceful is true.
func BeforeDelete(strategy RESTDeleteStrategy, ctx api.Context, obj runtime.Object, options *api.DeleteOptions) (graceful, gracefulPending bool, err error) {
	objectMeta, gvk, kerr := objectMetaAndKind(strategy, obj)
	if kerr != nil {
		return false, false, kerr
	}
	// Checking the Preconditions here to fail early. They'll be enforced later on when we actually do the deletion, too.
	if options.Preconditions != nil && options.Preconditions.UID != nil && *options.Preconditions.UID != objectMeta.UID {
		return false, false, errors.NewConflict(unversioned.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, objectMeta.Name, fmt.Errorf("the UID in the precondition (%s) does not match the UID in record (%s). The object might have been deleted and then recreated", *options.Preconditions.UID, objectMeta.UID))
	}
	gracefulStrategy, ok := strategy.(RESTGracefulDeleteStrategy)
	if !ok {
		return false, false, nil
	}
	// if the object is already being deleted
	if objectMeta.DeletionTimestamp != nil {
		// if we are already being deleted, we may only shorten the deletion grace period
		// this means the object was gracefully deleted previously but deletionGracePeriodSeconds was not set,
		// so we force deletion immediately
		if objectMeta.DeletionGracePeriodSeconds == nil {
			return false, false, nil
		}
		// only a shorter grace period may be provided by a user
		if options.GracePeriodSeconds != nil {
			period := int64(*options.GracePeriodSeconds)
			if period > *objectMeta.DeletionGracePeriodSeconds {
				return false, true, nil
			}
			now := unversioned.NewTime(unversioned.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds)))
			objectMeta.DeletionTimestamp = &now
			objectMeta.DeletionGracePeriodSeconds = &period
			options.GracePeriodSeconds = &period
			return true, false, nil
		}
		// graceful deletion is pending, do nothing
		options.GracePeriodSeconds = objectMeta.DeletionGracePeriodSeconds
		return false, true, nil
	}

	if !gracefulStrategy.CheckGracefulDelete(obj, options) {
		return false, false, nil
	}
	now := unversioned.NewTime(unversioned.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds)))
	objectMeta.DeletionTimestamp = &now
	objectMeta.DeletionGracePeriodSeconds = options.GracePeriodSeconds
	return true, false, nil
}
Пример #22
0
func (sb *summaryBuilder) containerInfoV2ToStats(
	name string,
	info *cadvisorapiv2.ContainerInfo) stats.ContainerStats {
	cStats := stats.ContainerStats{
		StartTime: unversioned.NewTime(info.Spec.CreationTime),
		Name:      name,
	}
	cstat, found := sb.latestContainerStats(info)
	if !found {
		return cStats
	}
	if info.Spec.HasCpu {
		cpuStats := stats.CPUStats{
			Time: unversioned.NewTime(cstat.Timestamp),
		}
		if cstat.CpuInst != nil {
			cpuStats.UsageNanoCores = &cstat.CpuInst.Usage.Total
		}
		if cstat.Cpu != nil {
			cpuStats.UsageCoreNanoSeconds = &cstat.Cpu.Usage.Total
		}
		cStats.CPU = &cpuStats
	}
	if info.Spec.HasMemory {
		pageFaults := cstat.Memory.ContainerData.Pgfault
		majorPageFaults := cstat.Memory.ContainerData.Pgmajfault
		cStats.Memory = &stats.MemoryStats{
			Time:            unversioned.NewTime(cstat.Timestamp),
			UsageBytes:      &cstat.Memory.Usage,
			WorkingSetBytes: &cstat.Memory.WorkingSet,
			RSSBytes:        &cstat.Memory.RSS,
			PageFaults:      &pageFaults,
			MajorPageFaults: &majorPageFaults,
		}
		// availableBytes = memory  limit (if known) - workingset
		if !isMemoryUnlimited(info.Spec.Memory.Limit) {
			availableBytes := info.Spec.Memory.Limit - cstat.Memory.WorkingSet
			cStats.Memory.AvailableBytes = &availableBytes
		}
	}

	sb.containerInfoV2FsStats(info, &cStats)
	cStats.UserDefinedMetrics = sb.containerInfoV2ToUserDefinedMetrics(info)
	return cStats
}
// Set Ready condition for the node.
func (kl *Kubelet) setNodeReadyCondition(node *api.Node) {
	// NOTE(aaronlevy): NodeReady condition needs to be the last in the list of node conditions.
	// This is due to an issue with version skewed kubelet and master components.
	// ref: https://github.com/kubernetes/kubernetes/issues/16961
	currentTime := unversioned.NewTime(kl.clock.Now())
	var newNodeReadyCondition api.NodeCondition
	if rs := kl.runtimeState.errors(); len(rs) == 0 {
		newNodeReadyCondition = api.NodeCondition{
			Type:              api.NodeReady,
			Status:            api.ConditionTrue,
			Reason:            "KubeletReady",
			Message:           "kubelet is posting ready status",
			LastHeartbeatTime: currentTime,
		}
	} else {
		newNodeReadyCondition = api.NodeCondition{
			Type:              api.NodeReady,
			Status:            api.ConditionFalse,
			Reason:            "KubeletNotReady",
			Message:           strings.Join(rs, ","),
			LastHeartbeatTime: currentTime,
		}
	}

	// Record any soft requirements that were not met in the container manager.
	status := kl.containerManager.Status()
	if status.SoftRequirements != nil {
		newNodeReadyCondition.Message = fmt.Sprintf("%s. WARNING: %s", newNodeReadyCondition.Message, status.SoftRequirements.Error())
	}

	readyConditionUpdated := false
	needToRecordEvent := false
	for i := range node.Status.Conditions {
		if node.Status.Conditions[i].Type == api.NodeReady {
			if node.Status.Conditions[i].Status == newNodeReadyCondition.Status {
				newNodeReadyCondition.LastTransitionTime = node.Status.Conditions[i].LastTransitionTime
			} else {
				newNodeReadyCondition.LastTransitionTime = currentTime
				needToRecordEvent = true
			}
			node.Status.Conditions[i] = newNodeReadyCondition
			readyConditionUpdated = true
			break
		}
	}
	if !readyConditionUpdated {
		newNodeReadyCondition.LastTransitionTime = currentTime
		node.Status.Conditions = append(node.Status.Conditions, newNodeReadyCondition)
	}
	if needToRecordEvent {
		if newNodeReadyCondition.Status == api.ConditionTrue {
			kl.recordNodeStatusEvent(api.EventTypeNormal, events.NodeReady)
		} else {
			kl.recordNodeStatusEvent(api.EventTypeNormal, events.NodeNotReady)
		}
	}
}
Пример #24
0
func agedImage(id, ref string, ageInMinutes int64) imageapi.Image {
	image := imageWithLayers(id, ref, false, layer1, layer2, layer3, layer4, layer5)

	if ageInMinutes >= 0 {
		image.CreationTimestamp = unversioned.NewTime(unversioned.Now().Add(time.Duration(-1*ageInMinutes) * time.Minute))
	}

	return image
}
Пример #25
0
func genTestSummaryNetwork(seed int) *stats.NetworkStats {
	return &stats.NetworkStats{
		Time:     unversioned.NewTime(scrapeTime),
		RxBytes:  uint64Val(seed, offsetNetRxBytes),
		RxErrors: uint64Val(seed, offsetNetRxErrors),
		TxBytes:  uint64Val(seed, offsetNetTxBytes),
		TxErrors: uint64Val(seed, offsetNetTxErrors),
	}
}
Пример #26
0
func genTestSummaryMemory(seed int) *stats.MemoryStats {
	return &stats.MemoryStats{
		Time:            unversioned.NewTime(scrapeTime),
		UsageBytes:      uint64Val(seed, offsetMemUsageBytes),
		WorkingSetBytes: uint64Val(seed, offsetMemWorkingSetBytes),
		PageFaults:      uint64Val(seed, offsetMemPageFaults),
		MajorPageFaults: uint64Val(seed, offsetMemMajorPageFaults),
	}
}
Пример #27
0
func genTestSummaryCPU(seed int) *stats.CPUStats {
	cpu := stats.CPUStats{
		Time:                 unversioned.NewTime(scrapeTime),
		UsageNanoCores:       uint64Val(seed, offsetCPUUsageCores),
		UsageCoreNanoSeconds: uint64Val(seed, offsetCPUUsageCoreSeconds),
	}
	*cpu.UsageNanoCores *= uint64(time.Millisecond.Nanoseconds())
	return &cpu
}
Пример #28
0
func TestThresholdsMetGracePeriod(t *testing.T) {
	now := unversioned.Now()
	hardThreshold := Threshold{
		Signal:   SignalMemoryAvailable,
		Operator: OpLessThan,
		Value: ThresholdValue{
			Quantity: quantityMustParse("1Gi"),
		},
	}
	softThreshold := Threshold{
		Signal:   SignalMemoryAvailable,
		Operator: OpLessThan,
		Value: ThresholdValue{
			Quantity: quantityMustParse("2Gi"),
		},
		GracePeriod: 1 * time.Minute,
	}
	oldTime := unversioned.NewTime(now.Time.Add(-2 * time.Minute))
	testCases := map[string]struct {
		observedAt thresholdsObservedAt
		now        time.Time
		result     []Threshold
	}{
		"empty": {
			observedAt: thresholdsObservedAt{},
			now:        now.Time,
			result:     []Threshold{},
		},
		"hard-threshold-met": {
			observedAt: thresholdsObservedAt{
				hardThreshold: now.Time,
			},
			now:    now.Time,
			result: []Threshold{hardThreshold},
		},
		"soft-threshold-not-met": {
			observedAt: thresholdsObservedAt{
				softThreshold: now.Time,
			},
			now:    now.Time,
			result: []Threshold{},
		},
		"soft-threshold-met": {
			observedAt: thresholdsObservedAt{
				softThreshold: oldTime.Time,
			},
			now:    now.Time,
			result: []Threshold{softThreshold},
		},
	}
	for testName, testCase := range testCases {
		actual := thresholdsMetGracePeriod(testCase.observedAt, now.Time)
		if !thresholdList(actual).Equal(thresholdList(testCase.result)) {
			t.Errorf("Test case: %s, expected: %v, actual: %v", testName, testCase.result, actual)
		}
	}
}
Пример #29
0
func TestRestartingPodWarning(t *testing.T) {
	g, _, err := osgraphtest.BuildGraph("../../../api/graph/test/restarting-pod.yaml")
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	defer func() { nowFn = unversioned.Now }()

	recent, _ := time.Parse(time.RFC3339, "2015-07-13T19:36:06Z")
	nowFn = func() unversioned.Time { return unversioned.NewTime(recent.UTC()) }
	markers := FindRestartingPods(g, osgraph.DefaultNamer, "oc logs", "oadm policy")
	sort.Sort(osgraph.BySeverity(markers))
	if e, a := 4, len(markers); e != a {
		t.Fatalf("expected %v, got %v", e, a)
	}
	if e, a := CrashLoopingPodError, markers[0].Key; e != a {
		t.Fatalf("expected %v, got %v", e, a)
	}
	if e, a := CrashLoopingPodError, markers[1].Key; e != a {
		t.Fatalf("expected %v, got %v", e, a)
	}
	if e, a := RestartingPodWarning, markers[2].Key; e != a {
		t.Fatalf("expected %v, got %v", e, a)
	}
	if e, a := RestartingPodWarning, markers[3].Key; e != a {
		t.Fatalf("expected %v, got %v", e, a)
	}

	future, _ := time.Parse(time.RFC3339, "2015-07-13T19:46:06Z")
	nowFn = func() unversioned.Time { return unversioned.NewTime(future.UTC()) }
	markers = FindRestartingPods(g, osgraph.DefaultNamer, "oc logs", "oadm policy")
	sort.Sort(osgraph.BySeverity(markers))
	if e, a := 3, len(markers); e != a {
		t.Fatalf("expected %v, got %v", e, a)
	}
	if e, a := CrashLoopingPodError, markers[0].Key; e != a {
		t.Fatalf("expected %v, got %v", e, a)
	}
	if e, a := CrashLoopingPodError, markers[1].Key; e != a {
		t.Fatalf("expected %v, got %v", e, a)
	}
	if e, a := RestartingPodWarning, markers[2].Key; e != a {
		t.Fatalf("expected %v, got %v", e, a)
	}
}
// setNodeInodePressureCondition for the node.
// TODO: this needs to move somewhere centralized...
func (kl *Kubelet) setNodeInodePressureCondition(node *api.Node) {
	currentTime := unversioned.NewTime(kl.clock.Now())
	var condition *api.NodeCondition

	// Check if NodeInodePressure condition already exists and if it does, just pick it up for update.
	for i := range node.Status.Conditions {
		if node.Status.Conditions[i].Type == api.NodeInodePressure {
			condition = &node.Status.Conditions[i]
		}
	}

	newCondition := false
	// If the NodeInodePressure condition doesn't exist, create one
	if condition == nil {
		condition = &api.NodeCondition{
			Type:   api.NodeInodePressure,
			Status: api.ConditionUnknown,
		}
		// cannot be appended to node.Status.Conditions here because it gets
		// copied to the slice. So if we append to the slice here none of the
		// updates we make below are reflected in the slice.
		newCondition = true
	}

	// Update the heartbeat time
	condition.LastHeartbeatTime = currentTime

	// Note: The conditions below take care of the case when a new NodeInodePressure condition is
	// created and as well as the case when the condition already exists. When a new condition
	// is created its status is set to api.ConditionUnknown which matches either
	// condition.Status != api.ConditionTrue or
	// condition.Status != api.ConditionFalse in the conditions below depending on whether
	// the kubelet is under inode pressure or not.
	if kl.evictionManager.IsUnderInodePressure() {
		if condition.Status != api.ConditionTrue {
			condition.Status = api.ConditionTrue
			condition.Reason = "KubeletHasInodePressure"
			condition.Message = "kubelet has inode pressure"
			condition.LastTransitionTime = currentTime
			kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasInodePressure")
		}
	} else {
		if condition.Status != api.ConditionFalse {
			condition.Status = api.ConditionFalse
			condition.Reason = "KubeletHasNoInodePressure"
			condition.Message = "kubelet has no inode pressure"
			condition.LastTransitionTime = currentTime
			kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasNoInodePressure")
		}
	}

	if newCondition {
		node.Status.Conditions = append(node.Status.Conditions, *condition)
	}

}