Exemple #1
0
// NewFilterBeforePredicate is a function that returns true if the build was created before the current time minus specified duration
func NewFilterBeforePredicate(d time.Duration) FilterPredicate {
	now := util.Now()
	before := util.NewTime(now.Time.Add(-1 * d))
	return func(item *kapi.ReplicationController) bool {
		return item.CreationTimestamp.Before(before)
	}
}
Exemple #2
0
// TestSort verifies that builds are sorted by most recently created
func TestSort(t *testing.T) {
	present := util.Now()
	past := util.NewTime(present.Time.Add(-1 * time.Minute))
	controllers := []*kapi.ReplicationController{
		{
			ObjectMeta: kapi.ObjectMeta{
				Name:              "past",
				CreationTimestamp: past,
			},
		},
		{
			ObjectMeta: kapi.ObjectMeta{
				Name:              "present",
				CreationTimestamp: present,
			},
		},
	}
	sort.Sort(sortableReplicationControllers(controllers))
	if controllers[0].Name != "present" {
		t.Errorf("Unexpected sort order")
	}
	if controllers[1].Name != "past" {
		t.Errorf("Unexpected sort order")
	}
}
Exemple #3
0
// NewFilterBeforePredicate is a function that returns true if the build was created before the current time minus specified duration
func NewFilterBeforePredicate(d time.Duration) FilterPredicate {
	now := util.Now()
	before := util.NewTime(now.Time.Add(-1 * d))
	return func(build *buildapi.Build) bool {
		return build.CreationTimestamp.Before(before)
	}
}
Exemple #4
0
func TestFilterBeforePredicate(t *testing.T) {
	youngerThan := time.Hour
	now := util.Now()
	old := util.NewTime(now.Time.Add(-1 * youngerThan))
	builds := []*buildapi.Build{
		{
			ObjectMeta: kapi.ObjectMeta{
				Name:              "old",
				CreationTimestamp: old,
			},
		},
		{
			ObjectMeta: kapi.ObjectMeta{
				Name:              "new",
				CreationTimestamp: now,
			},
		},
	}
	filter := &andFilter{
		filterPredicates: []FilterPredicate{NewFilterBeforePredicate(youngerThan)},
	}
	result := filter.Filter(builds)
	if len(result) != 1 {
		t.Errorf("Unexpected number of results")
	}
	if expected, actual := "old", result[0].Name; expected != actual {
		t.Errorf("expected %v, actual %v", expected, actual)
	}
}
Exemple #5
0
// BeforeDelete tests whether the object can be gracefully deleted. If graceful is set the object
// should be gracefully deleted, if gracefulPending is set the object has already been gracefully deleted
// (and the provided grace period is longer than the time to deletion), and an error is returned if the
// condition cannot be checked or the gracePeriodSeconds is invalid. The options argument may be updated with
// default values if graceful is true.
func BeforeDelete(strategy RESTDeleteStrategy, ctx api.Context, obj runtime.Object, options *api.DeleteOptions) (graceful, gracefulPending bool, err error) {
	if strategy == nil {
		return false, false, nil
	}
	objectMeta, _, kerr := objectMetaAndKind(strategy, obj)
	if kerr != nil {
		return false, false, kerr
	}

	// if the object is already being deleted
	if objectMeta.DeletionTimestamp != nil {
		// if we are already being deleted, we may only shorten the deletion grace period
		// this means the object was gracefully deleted previously but deletionGracePeriodSeconds was not set,
		// so we force deletion immediately
		if objectMeta.DeletionGracePeriodSeconds == nil {
			return false, false, nil
		}
		// only a shorter grace period may be provided by a user
		if options.GracePeriodSeconds != nil {
			period := int64(*options.GracePeriodSeconds)
			if period > *objectMeta.DeletionGracePeriodSeconds {
				return false, true, nil
			}
			now := util.NewTime(util.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds)))
			objectMeta.DeletionTimestamp = &now
			objectMeta.DeletionGracePeriodSeconds = &period
			options.GracePeriodSeconds = &period
			return true, false, nil
		}
		// graceful deletion is pending, do nothing
		options.GracePeriodSeconds = objectMeta.DeletionGracePeriodSeconds
		return false, true, nil
	}

	if !strategy.CheckGracefulDelete(obj, options) {
		return false, false, nil
	}
	now := util.NewTime(util.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds)))
	objectMeta.DeletionTimestamp = &now
	objectMeta.DeletionGracePeriodSeconds = options.GracePeriodSeconds
	return true, false, nil
}
Exemple #6
0
func agedImage(id, ref string, ageInMinutes int64) imageapi.Image {
	image := imageWithLayers(id, ref,
		"tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
		"tarsum.dev+sha256:b194de3772ebbcdc8f244f663669799ac1cb141834b7cb8b69100285d357a2b0",
		"tarsum.dev+sha256:c937c4bb1c1a21cc6d94340812262c6472092028972ae69b551b1a70d4276171",
		"tarsum.dev+sha256:2aaacc362ac6be2b9e9ae8c6029f6f616bb50aec63746521858e47841b90fabd",
		"tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
	)

	if ageInMinutes >= 0 {
		image.CreationTimestamp = util.NewTime(util.Now().Add(time.Duration(-1*ageInMinutes) * time.Minute))
	}

	return image
}
func init() {
	err := kapi.Scheme.AddConversionFuncs(
		// Convert docker client object to internal object
		func(in *docker.Image, out *DockerImage, s conversion.Scope) error {
			if err := s.Convert(&in.Config, &out.Config, conversion.AllowDifferentFieldTypeNames); err != nil {
				return err
			}
			if err := s.Convert(&in.ContainerConfig, &out.ContainerConfig, conversion.AllowDifferentFieldTypeNames); err != nil {
				return err
			}
			out.ID = in.ID
			out.Parent = in.Parent
			out.Comment = in.Comment
			out.Created = util.NewTime(in.Created)
			out.Container = in.Container
			out.DockerVersion = in.DockerVersion
			out.Author = in.Author
			out.Architecture = in.Architecture
			out.Size = in.Size
			return nil
		},
		func(in *DockerImage, out *docker.Image, s conversion.Scope) error {
			if err := s.Convert(&in.Config, &out.Config, conversion.AllowDifferentFieldTypeNames); err != nil {
				return err
			}
			if err := s.Convert(&in.ContainerConfig, &out.ContainerConfig, conversion.AllowDifferentFieldTypeNames); err != nil {
				return err
			}
			out.ID = in.ID
			out.Parent = in.Parent
			out.Comment = in.Comment
			out.Created = in.Created.Time
			out.Container = in.Container
			out.DockerVersion = in.DockerVersion
			out.Author = in.Author
			out.Architecture = in.Architecture
			out.Size = in.Size
			return nil
		},
	)
	if err != nil {
		// If one of the conversion functions is malformed, detect it immediately.
		panic(err)
	}
}
Exemple #8
0
func agedPod(namespace, name string, phase kapi.PodPhase, ageInMinutes int64, containerImages ...string) kapi.Pod {
	pod := kapi.Pod{
		ObjectMeta: kapi.ObjectMeta{
			Namespace: namespace,
			Name:      name,
		},
		Spec: podSpec(containerImages...),
		Status: kapi.PodStatus{
			Phase: phase,
		},
	}

	if ageInMinutes >= 0 {
		pod.CreationTimestamp = util.NewTime(util.Now().Add(time.Duration(-1*ageInMinutes) * time.Minute))
	}

	return pod
}
Exemple #9
0
func agedStream(registry, namespace, name string, ageInMinutes int64, tags map[string]imageapi.TagEventList) imageapi.ImageStream {
	stream := imageapi.ImageStream{
		ObjectMeta: kapi.ObjectMeta{
			Namespace: namespace,
			Name:      name,
		},
		Status: imageapi.ImageStreamStatus{
			DockerImageRepository: fmt.Sprintf("%s/%s/%s", registry, namespace, name),
			Tags: tags,
		},
	}

	if ageInMinutes >= 0 {
		stream.CreationTimestamp = util.NewTime(util.Now().Add(time.Duration(-1*ageInMinutes) * time.Minute))
	}

	return stream
}
Exemple #10
0
func TestFilterBeforePredicate(t *testing.T) {
	youngerThan := time.Hour
	now := util.Now()
	old := util.NewTime(now.Time.Add(-1 * youngerThan))
	items := []*kapi.ReplicationController{}
	items = append(items, withCreated(mockDeployment("a", "old", nil), old))
	items = append(items, withCreated(mockDeployment("a", "new", nil), now))
	filter := &andFilter{
		filterPredicates: []FilterPredicate{NewFilterBeforePredicate(youngerThan)},
	}
	result := filter.Filter(items)
	if len(result) != 1 {
		t.Errorf("Unexpected number of results")
	}
	if expected, actual := "old", result[0].Name; expected != actual {
		t.Errorf("expected %v, actual %v", expected, actual)
	}
}
Exemple #11
0
func TestNewStatusPreservesPodStartTime(t *testing.T) {
	syncer := newTestManager()
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			UID:       "12345678",
			Name:      "foo",
			Namespace: "new",
		},
		Status: api.PodStatus{},
	}
	now := util.Now()
	startTime := util.NewTime(now.Time.Add(-1 * time.Minute))
	pod.Status.StartTime = &startTime
	syncer.SetPodStatus(pod, getRandomPodStatus())

	status, _ := syncer.GetPodStatus(pod.UID)
	if !status.StartTime.Time.Equal(startTime.Time) {
		t.Errorf("Unexpected start time, expected %v, actual %v", startTime, status.StartTime)
	}
}
Exemple #12
0
func TestSortBuildPtrSliceByCreationTimestamp(t *testing.T) {
	present := util.Now()
	past := util.NewTime(present.Add(-time.Minute))
	builds := []*Build{
		{
			ObjectMeta: kapi.ObjectMeta{
				Name:              "present",
				CreationTimestamp: present,
			},
		},
		{
			ObjectMeta: kapi.ObjectMeta{
				Name:              "past",
				CreationTimestamp: past,
			},
		},
	}
	sort.Sort(BuildPtrSliceByCreationTimestamp(builds))
	if [2]string{builds[0].Name, builds[1].Name} != [2]string{"past", "present"} {
		t.Errorf("Unexpected sort order")
	}
}
func TestBuildDecorator(t *testing.T) {
	build := &buildapi.Build{
		ObjectMeta: kapi.ObjectMeta{Name: "buildid", Namespace: "default"},
		Spec: buildapi.BuildSpec{
			Source: buildapi.BuildSource{
				Type: buildapi.BuildSourceGit,
				Git: &buildapi.GitBuildSource{
					URI: "http://github.com/my/repository",
				},
				ContextDir: "context",
			},
			Strategy: buildapi.BuildStrategy{
				Type:           buildapi.DockerBuildStrategyType,
				DockerStrategy: &buildapi.DockerBuildStrategy{},
			},
			Output: buildapi.BuildOutput{
				To: &kapi.ObjectReference{
					Kind: "DockerImage",
					Name: "repository/data",
				},
			},
		},
		Status: buildapi.BuildStatus{
			Phase: buildapi.BuildPhaseNew,
		},
	}
	now := util.Now()
	startTime := util.NewTime(now.Time.Add(-1 * time.Minute))
	build.Status.StartTimestamp = &startTime
	err := Decorator(build)
	if err != nil {
		t.Errorf("Unexpected error decorating build")
	}
	if build.Status.Duration <= 0 {
		t.Errorf("Build duration should be greater than zero")
	}
}
func TestPrintEventsResultSorted(t *testing.T) {
	// Arrange
	printer := NewHumanReadablePrinter(false /* noHeaders */, false, false, []string{})

	obj := api.EventList{
		Items: []api.Event{
			{
				Source:         api.EventSource{Component: "kubelet"},
				Message:        "Item 1",
				FirstTimestamp: util.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
				LastTimestamp:  util.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
				Count:          1,
			},
			{
				Source:         api.EventSource{Component: "scheduler"},
				Message:        "Item 2",
				FirstTimestamp: util.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)),
				LastTimestamp:  util.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)),
				Count:          1,
			},
			{
				Source:         api.EventSource{Component: "kubelet"},
				Message:        "Item 3",
				FirstTimestamp: util.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)),
				LastTimestamp:  util.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)),
				Count:          1,
			},
		},
	}
	buffer := &bytes.Buffer{}

	// Act
	err := printer.PrintObj(&obj, buffer)

	// Assert
	if err != nil {
		t.Fatalf("An error occurred printing the EventList: %#v", err)
	}
	out := buffer.String()
	VerifyDatesInOrder(out, "\n" /* rowDelimiter */, "  " /* columnDelimiter */, t)
}
Exemple #15
0
func TestPodDescribeResultsSorted(t *testing.T) {
	// Arrange
	fake := testclient.NewSimpleFake(&api.EventList{
		Items: []api.Event{
			{
				Source:         api.EventSource{Component: "kubelet"},
				Message:        "Item 1",
				FirstTimestamp: util.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
				LastTimestamp:  util.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
				Count:          1,
			},
			{
				Source:         api.EventSource{Component: "scheduler"},
				Message:        "Item 2",
				FirstTimestamp: util.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)),
				LastTimestamp:  util.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)),
				Count:          1,
			},
			{
				Source:         api.EventSource{Component: "kubelet"},
				Message:        "Item 3",
				FirstTimestamp: util.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)),
				LastTimestamp:  util.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)),
				Count:          1,
			},
		},
	})
	c := &describeClient{T: t, Namespace: "foo", Interface: fake}
	d := PodDescriber{c}

	// Act
	out, err := d.Describe("foo", "bar")

	// Assert
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	VerifyDatesInOrder(out, "\n" /* rowDelimiter */, "\t" /* columnDelimiter */, t)
}
func TestSortableEvents(t *testing.T) {
	// Arrange
	list := SortableEvents([]api.Event{
		{
			Source:         api.EventSource{Component: "kubelet"},
			Message:        "Item 1",
			FirstTimestamp: util.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
			LastTimestamp:  util.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
			Count:          1,
		},
		{
			Source:         api.EventSource{Component: "scheduler"},
			Message:        "Item 2",
			FirstTimestamp: util.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)),
			LastTimestamp:  util.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)),
			Count:          1,
		},
		{
			Source:         api.EventSource{Component: "kubelet"},
			Message:        "Item 3",
			FirstTimestamp: util.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)),
			LastTimestamp:  util.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)),
			Count:          1,
		},
	})

	// Act
	sort.Sort(list)

	// Assert
	if list[0].Message != "Item 2" ||
		list[1].Message != "Item 3" ||
		list[2].Message != "Item 1" {
		t.Fatal("List is not sorted by time. List: ", list)
	}
}
Exemple #17
0
func TestDescribeContainers(t *testing.T) {
	testCases := []struct {
		container        api.Container
		status           api.ContainerStatus
		expectedElements []string
	}{
		// Running state.
		{
			container: api.Container{Name: "test", Image: "image"},
			status: api.ContainerStatus{
				Name: "test",
				State: api.ContainerState{
					Running: &api.ContainerStateRunning{
						StartedAt: util.NewTime(time.Now()),
					},
				},
				Ready:        true,
				RestartCount: 7,
			},
			expectedElements: []string{"test", "State", "Running", "Ready", "True", "Restart Count", "7", "Image", "image", "Started"},
		},
		// Waiting state.
		{
			container: api.Container{Name: "test", Image: "image"},
			status: api.ContainerStatus{
				Name: "test",
				State: api.ContainerState{
					Waiting: &api.ContainerStateWaiting{
						Reason: "potato",
					},
				},
				Ready:        true,
				RestartCount: 7,
			},
			expectedElements: []string{"test", "State", "Waiting", "Ready", "True", "Restart Count", "7", "Image", "image", "Reason", "potato"},
		},
		// Terminated state.
		{
			container: api.Container{Name: "test", Image: "image"},
			status: api.ContainerStatus{
				Name: "test",
				State: api.ContainerState{
					Terminated: &api.ContainerStateTerminated{
						StartedAt:  util.NewTime(time.Now()),
						FinishedAt: util.NewTime(time.Now()),
						Reason:     "potato",
						ExitCode:   2,
					},
				},
				Ready:        true,
				RestartCount: 7,
			},
			expectedElements: []string{"test", "State", "Terminated", "Ready", "True", "Restart Count", "7", "Image", "image", "Reason", "potato", "Started", "Finished", "Exit Code", "2"},
		},
		// Last Terminated
		{
			container: api.Container{Name: "test", Image: "image"},
			status: api.ContainerStatus{
				Name: "test",
				State: api.ContainerState{
					Running: &api.ContainerStateRunning{
						StartedAt: util.NewTime(time.Now()),
					},
				},
				LastTerminationState: api.ContainerState{
					Terminated: &api.ContainerStateTerminated{
						StartedAt:  util.NewTime(time.Now().Add(time.Second * 3)),
						FinishedAt: util.NewTime(time.Now()),
						Reason:     "crashing",
						ExitCode:   3,
					},
				},
				Ready:        true,
				RestartCount: 7,
			},
			expectedElements: []string{"test", "State", "Terminated", "Ready", "True", "Restart Count", "7", "Image", "image", "Started", "Finished", "Exit Code", "2", "crashing", "3"},
		},
		// No state defaults to waiting.
		{
			container: api.Container{Name: "test", Image: "image"},
			status: api.ContainerStatus{
				Name:         "test",
				Ready:        true,
				RestartCount: 7,
			},
			expectedElements: []string{"test", "State", "Waiting", "Ready", "True", "Restart Count", "7", "Image", "image"},
		},
		//env
		{
			container: api.Container{Name: "test", Image: "image", Env: []api.EnvVar{{Name: "envname", Value: "xyz"}}},
			status: api.ContainerStatus{
				Name:         "test",
				Ready:        true,
				RestartCount: 7,
			},
			expectedElements: []string{"test", "State", "Waiting", "Ready", "True", "Restart Count", "7", "Image", "image", "envname", "xyz"},
		},
		// Using limits.
		{
			container: api.Container{
				Name:  "test",
				Image: "image",
				Resources: api.ResourceRequirements{
					Limits: api.ResourceList{
						api.ResourceName(api.ResourceCPU):     resource.MustParse("1000"),
						api.ResourceName(api.ResourceMemory):  resource.MustParse("4G"),
						api.ResourceName(api.ResourceStorage): resource.MustParse("20G"),
					},
				},
			},
			status: api.ContainerStatus{
				Name:         "test",
				Ready:        true,
				RestartCount: 7,
			},
			expectedElements: []string{"cpu", "1k", "memory", "4G", "storage", "20G"},
		},
	}

	for i, testCase := range testCases {
		out := new(bytes.Buffer)
		pod := api.Pod{
			Spec: api.PodSpec{
				Containers: []api.Container{testCase.container},
			},
			Status: api.PodStatus{
				ContainerStatuses: []api.ContainerStatus{testCase.status},
			},
		}
		describeContainers(&pod, out)
		output := out.String()
		for _, expected := range testCase.expectedElements {
			if !strings.Contains(output, expected) {
				t.Errorf("Test case %d: expected to find %q in output: %q", i, expected, output)
			}
		}
	}
}
Exemple #18
0
func TestPruneTask(t *testing.T) {
	BuildPhaseOptions := []buildapi.BuildPhase{
		buildapi.BuildPhaseCancelled,
		buildapi.BuildPhaseComplete,
		buildapi.BuildPhaseError,
		buildapi.BuildPhaseFailed,
		buildapi.BuildPhaseNew,
		buildapi.BuildPhasePending,
		buildapi.BuildPhaseRunning,
	}
	BuildPhaseFilter := []buildapi.BuildPhase{
		buildapi.BuildPhaseCancelled,
		buildapi.BuildPhaseComplete,
		buildapi.BuildPhaseError,
		buildapi.BuildPhaseFailed,
	}
	BuildPhaseFilterSet := sets.String{}
	for _, BuildPhase := range BuildPhaseFilter {
		BuildPhaseFilterSet.Insert(string(BuildPhase))
	}

	for _, orphans := range []bool{true, false} {
		for _, BuildPhaseOption := range BuildPhaseOptions {
			keepYoungerThan := time.Hour

			now := util.Now()
			old := util.NewTime(now.Time.Add(-1 * keepYoungerThan))

			buildConfigs := []*buildapi.BuildConfig{}
			builds := []*buildapi.Build{}

			buildConfig := mockBuildConfig("a", "build-config")
			buildConfigs = append(buildConfigs, buildConfig)

			builds = append(builds, withCreated(withStatus(mockBuild("a", "build-1", buildConfig), BuildPhaseOption), now))
			builds = append(builds, withCreated(withStatus(mockBuild("a", "build-2", buildConfig), BuildPhaseOption), old))
			builds = append(builds, withCreated(withStatus(mockBuild("a", "orphan-build-1", nil), BuildPhaseOption), now))
			builds = append(builds, withCreated(withStatus(mockBuild("a", "orphan-build-2", nil), BuildPhaseOption), old))

			keepComplete := 1
			keepFailed := 1
			expectedValues := sets.String{}
			filter := &andFilter{
				filterPredicates: []FilterPredicate{NewFilterBeforePredicate(keepYoungerThan)},
			}
			dataSet := NewDataSet(buildConfigs, filter.Filter(builds))
			resolver := NewPerBuildConfigResolver(dataSet, keepComplete, keepFailed)
			if orphans {
				resolver = &mergeResolver{
					resolvers: []Resolver{resolver, NewOrphanBuildResolver(dataSet, BuildPhaseFilter)},
				}
			}
			expectedBuilds, err := resolver.Resolve()
			for _, build := range expectedBuilds {
				expectedValues.Insert(build.Name)
			}

			recorder := &mockPruneRecorder{set: sets.String{}}
			task := NewPruneTasker(buildConfigs, builds, keepYoungerThan, orphans, keepComplete, keepFailed, recorder.Handler)
			err = task.PruneTask()
			if err != nil {
				t.Errorf("Unexpected error %v", err)
			}
			recorder.Verify(t, expectedValues)
		}
	}

}
Exemple #19
0
func TestPruneTask(t *testing.T) {
	deploymentStatusOptions := []deployapi.DeploymentStatus{
		deployapi.DeploymentStatusComplete,
		deployapi.DeploymentStatusFailed,
		deployapi.DeploymentStatusNew,
		deployapi.DeploymentStatusPending,
		deployapi.DeploymentStatusRunning,
	}
	deploymentStatusFilter := []deployapi.DeploymentStatus{
		deployapi.DeploymentStatusComplete,
		deployapi.DeploymentStatusFailed,
	}
	deploymentStatusFilterSet := util.StringSet{}
	for _, deploymentStatus := range deploymentStatusFilter {
		deploymentStatusFilterSet.Insert(string(deploymentStatus))
	}

	for _, orphans := range []bool{true, false} {
		for _, deploymentStatusOption := range deploymentStatusOptions {
			keepYoungerThan := time.Hour

			now := util.Now()
			old := util.NewTime(now.Time.Add(-1 * keepYoungerThan))

			deploymentConfigs := []*deployapi.DeploymentConfig{}
			deployments := []*kapi.ReplicationController{}

			deploymentConfig := mockDeploymentConfig("a", "deployment-config")
			deploymentConfigs = append(deploymentConfigs, deploymentConfig)

			deployments = append(deployments, withCreated(withStatus(mockDeployment("a", "build-1", deploymentConfig), deploymentStatusOption), now))
			deployments = append(deployments, withCreated(withStatus(mockDeployment("a", "build-2", deploymentConfig), deploymentStatusOption), old))
			deployments = append(deployments, withSize(withCreated(withStatus(mockDeployment("a", "build-3-with-replicas", deploymentConfig), deploymentStatusOption), old), 4))
			deployments = append(deployments, withCreated(withStatus(mockDeployment("a", "orphan-build-1", nil), deploymentStatusOption), now))
			deployments = append(deployments, withCreated(withStatus(mockDeployment("a", "orphan-build-2", nil), deploymentStatusOption), old))
			deployments = append(deployments, withSize(withCreated(withStatus(mockDeployment("a", "orphan-build-3-with-replicas", nil), deploymentStatusOption), old), 4))

			keepComplete := 1
			keepFailed := 1
			expectedValues := util.StringSet{}
			filter := &andFilter{
				filterPredicates: []FilterPredicate{
					FilterDeploymentsPredicate,
					FilterZeroReplicaSize,
					NewFilterBeforePredicate(keepYoungerThan),
				},
			}
			dataSet := NewDataSet(deploymentConfigs, filter.Filter(deployments))
			resolver := NewPerDeploymentConfigResolver(dataSet, keepComplete, keepFailed)
			if orphans {
				resolver = &mergeResolver{
					resolvers: []Resolver{resolver, NewOrphanDeploymentResolver(dataSet, deploymentStatusFilter)},
				}
			}
			expectedDeployments, err := resolver.Resolve()
			for _, item := range expectedDeployments {
				expectedValues.Insert(item.Name)
			}

			recorder := &mockPruneRecorder{set: util.StringSet{}}
			task := NewPruneTasker(deploymentConfigs, deployments, keepYoungerThan, orphans, keepComplete, keepFailed, recorder.Handler)
			err = task.PruneTask()
			if err != nil {
				t.Errorf("Unexpected error %v", err)
			}
			recorder.Verify(t, expectedValues)
		}
	}

}
Exemple #20
0
					defer GinkgoRecover()

					if p.Status.Phase == api.PodRunning {
						if _, found := watchTimes[p.Name]; !found {
							watchTimes[p.Name] = util.Now()
							createTimes[p.Name] = p.CreationTimestamp
							nodes[p.Name] = p.Spec.NodeName
							var startTime util.Time
							for _, cs := range p.Status.ContainerStatuses {
								if cs.State.Running != nil {
									if startTime.Before(cs.State.Running.StartedAt) {
										startTime = cs.State.Running.StartedAt
									}
								}
							}
							if startTime != util.NewTime(time.Time{}) {
								runTimes[p.Name] = startTime
							} else {
								Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
							}
						}
					}
				}

				additionalPodsPrefix = "density-latency-pod-" + string(util.NewUUID())
				_, controller := framework.NewInformer(
					&cache.ListWatch{
						ListFunc: func() (runtime.Object, error) {
							return c.Pods(ns).List(labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix}), fields.Everything())
						},
						WatchFunc: func(rv string) (watch.Interface, error) {
Exemple #21
0
func (a *HorizontalController) reconcileAutoscaler(hpa experimental.HorizontalPodAutoscaler) error {
	reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Namespace, hpa.Spec.ScaleRef.Name)

	scale, err := a.client.Experimental().Scales(hpa.Spec.ScaleRef.Namespace).Get(hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Name)
	if err != nil {
		a.eventRecorder.Event(&hpa, "FailedGetScale", err.Error())
		return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err)
	}
	currentReplicas := scale.Status.Replicas
	currentConsumption, err := a.metricsClient.
		ResourceConsumption(hpa.Spec.ScaleRef.Namespace).
		Get(hpa.Spec.Target.Resource, scale.Status.Selector)

	// TODO: what to do on partial errors (like metrics obtained for 75% of pods).
	if err != nil {
		a.eventRecorder.Event(&hpa, "FailedGetMetrics", err.Error())
		return fmt.Errorf("failed to get metrics for %s: %v", reference, err)
	}

	usageRatio := float64(currentConsumption.Quantity.MilliValue()) / float64(hpa.Spec.Target.Quantity.MilliValue())
	desiredReplicas := int(math.Ceil(usageRatio * float64(currentReplicas)))

	if desiredReplicas < hpa.Spec.MinCount {
		desiredReplicas = hpa.Spec.MinCount
	}

	// TODO: remove when pod ideling is done.
	if desiredReplicas == 0 {
		desiredReplicas = 1
	}

	if desiredReplicas > hpa.Spec.MaxCount {
		desiredReplicas = hpa.Spec.MaxCount
	}
	now := time.Now()
	rescale := false

	if desiredReplicas != currentReplicas {
		// Going down only if the usageRatio dropped significantly below the target
		// and there was no rescaling in the last downscaleForbiddenWindow.
		if desiredReplicas < currentReplicas && usageRatio < (1-tolerance) &&
			(hpa.Status == nil || hpa.Status.LastScaleTimestamp == nil ||
				hpa.Status.LastScaleTimestamp.Add(downscaleForbiddenWindow).Before(now)) {
			rescale = true
		}

		// Going up only if the usage ratio increased significantly above the target
		// and there was no rescaling in the last upscaleForbiddenWindow.
		if desiredReplicas > currentReplicas && usageRatio > (1+tolerance) &&
			(hpa.Status == nil || hpa.Status.LastScaleTimestamp == nil ||
				hpa.Status.LastScaleTimestamp.Add(upscaleForbiddenWindow).Before(now)) {
			rescale = true
		}
	}

	if rescale {
		scale.Spec.Replicas = desiredReplicas
		_, err = a.client.Experimental().Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale)
		if err != nil {
			a.eventRecorder.Eventf(&hpa, "FailedRescale", "New size: %d; error: %v", desiredReplicas, err.Error())
			return fmt.Errorf("failed to rescale %s: %v", reference, err)
		}
		a.eventRecorder.Eventf(&hpa, "SuccessfulRescale", "New size: %d", desiredReplicas)
	} else {
		desiredReplicas = currentReplicas
	}

	status := experimental.HorizontalPodAutoscalerStatus{
		CurrentReplicas:    currentReplicas,
		DesiredReplicas:    desiredReplicas,
		CurrentConsumption: currentConsumption,
	}
	hpa.Status = &status
	if rescale {
		now := util.NewTime(now)
		hpa.Status.LastScaleTimestamp = &now
	}

	_, err = a.client.Experimental().HorizontalPodAutoscalers(hpa.Namespace).Update(&hpa)
	if err != nil {
		a.eventRecorder.Event(&hpa, "FailedUpdateStatus", err.Error())
		return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
	}
	return nil
}
Exemple #22
0
func (a *HorizontalController) reconcileAutoscalers() error {
	ns := api.NamespaceAll
	list, err := a.client.Experimental().HorizontalPodAutoscalers(ns).List(labels.Everything(), fields.Everything())
	if err != nil {
		return fmt.Errorf("error listing nodes: %v", err)
	}
	for _, hpa := range list.Items {
		reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Namespace, hpa.Spec.ScaleRef.Name)

		scale, err := a.client.Experimental().Scales(hpa.Spec.ScaleRef.Namespace).Get(hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Name)
		if err != nil {
			glog.Warningf("Failed to query scale subresource for %s: %v", reference, err)
			continue
		}
		currentReplicas := scale.Status.Replicas
		currentConsumption, err := a.metricsClient.ResourceConsumption(hpa.Spec.ScaleRef.Namespace).Get(hpa.Spec.Target.Resource,
			scale.Status.Selector)

		// TODO: what to do on partial errors (like metrics obtained for 75% of pods).
		if err != nil {
			glog.Warningf("Error while getting metrics for %s: %v", reference, err)
			continue
		}

		usageRatio := float64(currentConsumption.Quantity.MilliValue()) / float64(hpa.Spec.Target.Quantity.MilliValue())
		desiredReplicas := int(math.Ceil(usageRatio * float64(currentReplicas)))

		if desiredReplicas < hpa.Spec.MinCount {
			desiredReplicas = hpa.Spec.MinCount
		}

		// TODO: remove when pod ideling is done.
		if desiredReplicas == 0 {
			desiredReplicas = 1
		}

		if desiredReplicas > hpa.Spec.MaxCount {
			desiredReplicas = hpa.Spec.MaxCount
		}
		now := time.Now()
		rescale := false

		if desiredReplicas != currentReplicas {
			// Going down only if the usageRatio dropped significantly below the target
			// and there was no rescaling in the last downscaleForbiddenWindow.
			if desiredReplicas < currentReplicas && usageRatio < (1-tolerance) &&
				(hpa.Status == nil || hpa.Status.LastScaleTimestamp == nil ||
					hpa.Status.LastScaleTimestamp.Add(downscaleForbiddenWindow).Before(now)) {
				rescale = true
			}

			// Going up only if the usage ratio increased significantly above the target
			// and there was no rescaling in the last upscaleForbiddenWindow.
			if desiredReplicas > currentReplicas && usageRatio > (1+tolerance) &&
				(hpa.Status == nil || hpa.Status.LastScaleTimestamp == nil ||
					hpa.Status.LastScaleTimestamp.Add(upscaleForbiddenWindow).Before(now)) {
				rescale = true
			}
		}

		if rescale {
			scale.Spec.Replicas = desiredReplicas
			_, err = a.client.Experimental().Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale)
			if err != nil {
				glog.Warningf("Failed to rescale %s: %v", reference, err)
				continue
			}
		} else {
			desiredReplicas = currentReplicas
		}

		status := experimental.HorizontalPodAutoscalerStatus{
			CurrentReplicas:    currentReplicas,
			DesiredReplicas:    desiredReplicas,
			CurrentConsumption: currentConsumption,
		}
		hpa.Status = &status
		if rescale {
			now := util.NewTime(now)
			hpa.Status.LastScaleTimestamp = &now
		}

		_, err = a.client.Experimental().HorizontalPodAutoscalers(hpa.Namespace).Update(&hpa)
		if err != nil {
			glog.Warningf("Failed to update HorizontalPodAutoscaler %s: %v", hpa.Name, err)
			continue
		}
	}
	return nil
}
func TestPrintHumanReadableWithNamespace(t *testing.T) {
	namespaceName := "testnamespace"
	name := "test"
	table := []struct {
		obj          runtime.Object
		isNamespaced bool
	}{
		{
			obj: &api.Pod{
				ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
			},
			isNamespaced: true,
		},
		{
			obj: &api.ReplicationController{
				ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
				Spec: api.ReplicationControllerSpec{
					Replicas: 2,
					Template: &api.PodTemplateSpec{
						ObjectMeta: api.ObjectMeta{
							Labels: map[string]string{
								"name": "foo",
								"type": "production",
							},
						},
						Spec: api.PodSpec{
							Containers: []api.Container{
								{
									Image: "foo/bar",
									TerminationMessagePath: api.TerminationMessagePathDefault,
									ImagePullPolicy:        api.PullIfNotPresent,
								},
							},
							RestartPolicy: api.RestartPolicyAlways,
							DNSPolicy:     api.DNSDefault,
							NodeSelector: map[string]string{
								"baz": "blah",
							},
						},
					},
				},
			},
			isNamespaced: true,
		},
		{
			obj: &api.Service{
				ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
				Spec: api.ServiceSpec{
					ClusterIP: "1.2.3.4",
					Ports: []api.ServicePort{
						{
							Port:     80,
							Protocol: "TCP",
						},
					},
				},
				Status: api.ServiceStatus{
					LoadBalancer: api.LoadBalancerStatus{
						Ingress: []api.LoadBalancerIngress{
							{
								IP: "2.3.4.5",
							},
						},
					},
				},
			},
			isNamespaced: true,
		},
		{
			obj: &api.Endpoints{
				ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
				Subsets: []api.EndpointSubset{{
					Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}, {IP: "localhost"}},
					Ports:     []api.EndpointPort{{Port: 8080}},
				},
				}},
			isNamespaced: true,
		},
		{
			obj: &api.Namespace{
				ObjectMeta: api.ObjectMeta{Name: name},
			},
			isNamespaced: false,
		},
		{
			obj: &api.Secret{
				ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
			},
			isNamespaced: true,
		},
		{
			obj: &api.ServiceAccount{
				ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
				Secrets:    []api.ObjectReference{},
			},
			isNamespaced: true,
		},
		{
			obj: &api.Node{
				ObjectMeta: api.ObjectMeta{Name: name},
				Status:     api.NodeStatus{},
			},
			isNamespaced: false,
		},
		{
			obj: &api.PersistentVolume{
				ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
				Spec:       api.PersistentVolumeSpec{},
			},
			isNamespaced: false,
		},
		{
			obj: &api.PersistentVolumeClaim{
				ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
				Spec:       api.PersistentVolumeClaimSpec{},
			},
			isNamespaced: true,
		},
		{
			obj: &api.Event{
				ObjectMeta:     api.ObjectMeta{Name: name, Namespace: namespaceName},
				Source:         api.EventSource{Component: "kubelet"},
				Message:        "Item 1",
				FirstTimestamp: util.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
				LastTimestamp:  util.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
				Count:          1,
			},
			isNamespaced: true,
		},
		{
			obj: &api.LimitRange{
				ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
			},
			isNamespaced: true,
		},
		{
			obj: &api.ResourceQuota{
				ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
			},
			isNamespaced: true,
		},
		{
			obj: &api.ComponentStatus{
				Conditions: []api.ComponentCondition{
					{Type: api.ComponentHealthy, Status: api.ConditionTrue, Message: "ok", Error: ""},
				},
			},
			isNamespaced: false,
		},
	}

	for _, test := range table {
		if test.isNamespaced {
			// Expect output to include namespace when requested.
			printer := NewHumanReadablePrinter(false, true, false, []string{})
			buffer := &bytes.Buffer{}
			err := printer.PrintObj(test.obj, buffer)
			if err != nil {
				t.Fatalf("An error occurred printing object: %#v", err)
			}
			matched := contains(strings.Fields(buffer.String()), fmt.Sprintf("%s", namespaceName))
			if !matched {
				t.Errorf("Expect printing object to contain namespace: %#v", test.obj)
			}
		} else {
			// Expect error when trying to get all namespaces for un-namespaced object.
			printer := NewHumanReadablePrinter(false, true, false, []string{})
			buffer := &bytes.Buffer{}
			err := printer.PrintObj(test.obj, buffer)
			if err == nil {
				t.Errorf("Expected error when printing un-namespaced type")
			}
		}
	}
}
Exemple #24
0
func TestPerDeploymentConfigResolver(t *testing.T) {
	deploymentStatusOptions := []deployapi.DeploymentStatus{
		deployapi.DeploymentStatusComplete,
		deployapi.DeploymentStatusFailed,
		deployapi.DeploymentStatusNew,
		deployapi.DeploymentStatusPending,
		deployapi.DeploymentStatusRunning,
	}
	deploymentConfigs := []*deployapi.DeploymentConfig{
		mockDeploymentConfig("a", "deployment-config-1"),
		mockDeploymentConfig("b", "deployment-config-2"),
	}
	deploymentsPerStatus := 100
	deployments := []*kapi.ReplicationController{}
	for _, deploymentConfig := range deploymentConfigs {
		for _, deploymentStatusOption := range deploymentStatusOptions {
			for i := 0; i < deploymentsPerStatus; i++ {
				deployment := withStatus(mockDeployment(deploymentConfig.Namespace, fmt.Sprintf("%v-%v-%v", deploymentConfig.Name, deploymentStatusOption, i), deploymentConfig), deploymentStatusOption)
				deployments = append(deployments, deployment)
			}
		}
	}

	now := util.Now()
	for i := range deployments {
		creationTimestamp := util.NewTime(now.Time.Add(-1 * time.Duration(i) * time.Hour))
		deployments[i].CreationTimestamp = creationTimestamp
	}

	// test number to keep at varying ranges
	for keep := 0; keep < deploymentsPerStatus*2; keep++ {
		dataSet := NewDataSet(deploymentConfigs, deployments)

		expectedNames := sets.String{}
		deploymentCompleteStatusFilterSet := sets.NewString(string(deployapi.DeploymentStatusComplete))
		deploymentFailedStatusFilterSet := sets.NewString(string(deployapi.DeploymentStatusFailed))

		for _, deploymentConfig := range deploymentConfigs {
			deploymentItems, err := dataSet.ListDeploymentsByDeploymentConfig(deploymentConfig)
			if err != nil {
				t.Errorf("Unexpected err %v", err)
			}
			completedDeployments, failedDeployments := []*kapi.ReplicationController{}, []*kapi.ReplicationController{}
			for _, deployment := range deploymentItems {
				status := deployment.Annotations[deployapi.DeploymentStatusAnnotation]
				if deploymentCompleteStatusFilterSet.Has(status) {
					completedDeployments = append(completedDeployments, deployment)
				} else if deploymentFailedStatusFilterSet.Has(status) {
					failedDeployments = append(failedDeployments, deployment)
				}
			}
			sort.Sort(sortableReplicationControllers(completedDeployments))
			sort.Sort(sortableReplicationControllers(failedDeployments))
			purgeCompleted := []*kapi.ReplicationController{}
			purgeFailed := []*kapi.ReplicationController{}
			if keep >= 0 && keep < len(completedDeployments) {
				purgeCompleted = completedDeployments[keep:]
			}
			if keep >= 0 && keep < len(failedDeployments) {
				purgeFailed = failedDeployments[keep:]
			}
			for _, deployment := range purgeCompleted {
				expectedNames.Insert(deployment.Name)
			}
			for _, deployment := range purgeFailed {
				expectedNames.Insert(deployment.Name)
			}
		}

		resolver := NewPerDeploymentConfigResolver(dataSet, keep, keep)
		results, err := resolver.Resolve()
		if err != nil {
			t.Errorf("Unexpected error %v", err)
		}
		foundNames := sets.String{}
		for _, result := range results {
			foundNames.Insert(result.Name)
		}
		if len(foundNames) != len(expectedNames) || !expectedNames.HasAll(foundNames.List()...) {
			expectedValues := expectedNames.List()
			actualValues := foundNames.List()
			sort.Strings(expectedValues)
			sort.Strings(actualValues)
			t.Errorf("keep %v\n, expected \n\t%v\n, actual \n\t%v\n", keep, expectedValues, actualValues)
		}
	}
}
func (a *HorizontalPodAutoscalerController) reconcileAutoscalers() error {
	ns := api.NamespaceAll
	list, err := a.expClient.HorizontalPodAutoscalers(ns).List(labels.Everything(), fields.Everything())
	if err != nil {
		return fmt.Errorf("error listing nodes: %v", err)
	}
	for _, hpa := range list.Items {
		reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Namespace, hpa.Spec.ScaleRef.Name)

		scale, err := a.expClient.Scales(hpa.Spec.ScaleRef.Namespace).Get(hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Name)
		if err != nil {
			glog.Warningf("Failed to query scale subresource for %s: %v", reference, err)
			continue
		}
		podList, err := a.client.Pods(hpa.Spec.ScaleRef.Namespace).
			List(labels.SelectorFromSet(labels.Set(scale.Status.Selector)), fields.Everything())

		if err != nil {
			glog.Warningf("Failed to get pod list for %s: %v", reference, err)
			continue
		}
		podNames := []string{}
		for _, pod := range podList.Items {
			podNames = append(podNames, pod.Name)
		}

		metricSpec, metricDefined := resourceDefinitions[hpa.Spec.Target.Resource]
		if !metricDefined {
			glog.Warningf("Heapster metric not defined for %s %v", reference, hpa.Spec.Target.Resource)
			continue
		}
		now := time.Now()

		startTime := now.Add(heapsterQueryStart)
		metricPath := fmt.Sprintf("/api/v1/model/namespaces/%s/pod-list/%s/metrics/%s",
			hpa.Spec.ScaleRef.Namespace,
			strings.Join(podNames, ","),
			metricSpec.name)

		resultRaw, err := a.client.Services(heapsterNamespace).
			ProxyGet(heapsterService, metricPath, map[string]string{"start": startTime.Format(time.RFC3339)}).
			DoRaw()

		if err != nil {
			glog.Warningf("Failed to get pods metrics for %s: %v", reference, err)
			continue
		}

		var metrics heapster.MetricResultList
		err = json.Unmarshal(resultRaw, &metrics)
		if err != nil {
			glog.Warningf("Failed to unmarshall heapster response: %v", err)
			continue
		}

		glog.Infof("Metrics available for %s: %s", reference, string(resultRaw))

		currentConsumption, count := metricSpec.aggregator(metrics)
		if count != len(podList.Items) {
			glog.Warningf("Metrics obtained for %d/%d of pods", count, len(podList.Items))
			continue
		}

		// if the ratio is 1.2 we want to have 2 replicas
		desiredReplicas := 1 + int((currentConsumption.Quantity.MilliValue()*int64(count))/hpa.Spec.Target.Quantity.MilliValue())

		if desiredReplicas < hpa.Spec.MinCount {
			desiredReplicas = hpa.Spec.MinCount
		}
		if desiredReplicas > hpa.Spec.MaxCount {
			desiredReplicas = hpa.Spec.MaxCount
		}

		rescale := false

		if desiredReplicas != count {
			// Going down
			if desiredReplicas < count && (hpa.Status == nil || hpa.Status.LastScaleTimestamp == nil ||
				hpa.Status.LastScaleTimestamp.Add(downscaleForbiddenWindow).Before(now)) {
				rescale = true
			}

			// Going up
			if desiredReplicas > count && (hpa.Status == nil || hpa.Status.LastScaleTimestamp == nil ||
				hpa.Status.LastScaleTimestamp.Add(upscaleForbiddenWindow).Before(now)) {
				rescale = true
			}

			if rescale {
				scale.Spec.Replicas = desiredReplicas
				_, err = a.expClient.Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale)
				if err != nil {
					glog.Warningf("Failed to rescale %s: %v", reference, err)
					continue
				}
			}
		}

		status := expapi.HorizontalPodAutoscalerStatus{
			CurrentReplicas:    count,
			DesiredReplicas:    desiredReplicas,
			CurrentConsumption: &currentConsumption,
		}
		hpa.Status = &status
		if rescale {
			now := util.NewTime(now)
			hpa.Status.LastScaleTimestamp = &now
		}

		_, err = a.expClient.HorizontalPodAutoscalers(hpa.Namespace).Update(&hpa)
		if err != nil {
			glog.Warningf("Failed to update HorizontalPodAutoscaler %s: %v", hpa.Name, err)
			continue
		}
	}
	return nil
}
Exemple #26
0
func TestPerBuildConfigResolver(t *testing.T) {
	BuildPhaseOptions := []buildapi.BuildPhase{
		buildapi.BuildPhaseCancelled,
		buildapi.BuildPhaseComplete,
		buildapi.BuildPhaseError,
		buildapi.BuildPhaseFailed,
		buildapi.BuildPhaseNew,
		buildapi.BuildPhasePending,
		buildapi.BuildPhaseRunning,
	}
	buildConfigs := []*buildapi.BuildConfig{
		mockBuildConfig("a", "build-config-1"),
		mockBuildConfig("b", "build-config-2"),
	}
	buildsPerStatus := 100
	builds := []*buildapi.Build{}
	for _, buildConfig := range buildConfigs {
		for _, BuildPhaseOption := range BuildPhaseOptions {
			for i := 0; i < buildsPerStatus; i++ {
				build := withStatus(mockBuild(buildConfig.Namespace, fmt.Sprintf("%v-%v-%v", buildConfig.Name, BuildPhaseOption, i), buildConfig), BuildPhaseOption)
				builds = append(builds, build)
			}
		}
	}

	now := util.Now()
	for i := range builds {
		creationTimestamp := util.NewTime(now.Time.Add(-1 * time.Duration(i) * time.Hour))
		builds[i].CreationTimestamp = creationTimestamp
	}

	// test number to keep at varying ranges
	for keep := 0; keep < buildsPerStatus*2; keep++ {
		dataSet := NewDataSet(buildConfigs, builds)

		expectedNames := util.StringSet{}
		buildCompleteStatusFilterSet := util.NewStringSet(string(buildapi.BuildPhaseComplete))
		buildFailedStatusFilterSet := util.NewStringSet(string(buildapi.BuildPhaseCancelled), string(buildapi.BuildPhaseError), string(buildapi.BuildPhaseFailed))

		for _, buildConfig := range buildConfigs {
			buildItems, err := dataSet.ListBuildsByBuildConfig(buildConfig)
			if err != nil {
				t.Errorf("Unexpected err %v", err)
			}
			completedBuilds, failedBuilds := []*buildapi.Build{}, []*buildapi.Build{}
			for _, build := range buildItems {
				if buildCompleteStatusFilterSet.Has(string(build.Status.Phase)) {
					completedBuilds = append(completedBuilds, build)
				} else if buildFailedStatusFilterSet.Has(string(build.Status.Phase)) {
					failedBuilds = append(failedBuilds, build)
				}
			}
			sort.Sort(sortableBuilds(completedBuilds))
			sort.Sort(sortableBuilds(failedBuilds))
			purgeCompleted := []*buildapi.Build{}
			purgeFailed := []*buildapi.Build{}
			if keep >= 0 && keep < len(completedBuilds) {
				purgeCompleted = completedBuilds[keep:]
			}
			if keep >= 0 && keep < len(failedBuilds) {
				purgeFailed = failedBuilds[keep:]
			}
			for _, build := range purgeCompleted {
				expectedNames.Insert(build.Name)
			}
			for _, build := range purgeFailed {
				expectedNames.Insert(build.Name)
			}
		}

		resolver := NewPerBuildConfigResolver(dataSet, keep, keep)
		results, err := resolver.Resolve()
		if err != nil {
			t.Errorf("Unexpected error %v", err)
		}
		foundNames := util.StringSet{}
		for _, result := range results {
			foundNames.Insert(result.Name)
		}
		if len(foundNames) != len(expectedNames) || !expectedNames.HasAll(foundNames.List()...) {
			expectedValues := expectedNames.List()
			actualValues := foundNames.List()
			sort.Strings(expectedValues)
			sort.Strings(actualValues)
			t.Errorf("keep %v\n, expected \n\t%v\n, actual \n\t%v\n", keep, expectedValues, actualValues)
		}
	}
}
func TestGraph(t *testing.T) {
	g := osgraph.New()
	now := time.Now()
	builds := []buildapi.Build{
		{
			ObjectMeta: kapi.ObjectMeta{
				Name:              "build1-1-abc",
				Labels:            map[string]string{buildapi.BuildConfigLabel: "build1"},
				CreationTimestamp: util.NewTime(now.Add(-10 * time.Second)),
			},
			Status: buildapi.BuildStatus{
				Phase: buildapi.BuildPhaseFailed,
			},
		},
		{
			ObjectMeta: kapi.ObjectMeta{
				Name:              "build1-2-abc",
				Labels:            map[string]string{buildapi.BuildConfigLabel: "build1"},
				CreationTimestamp: util.NewTime(now.Add(-5 * time.Second)),
			},
			Status: buildapi.BuildStatus{
				Phase: buildapi.BuildPhaseComplete,
			},
		},
		{
			ObjectMeta: kapi.ObjectMeta{
				Name:              "build1-3-abc",
				Labels:            map[string]string{buildapi.BuildConfigLabel: "build1"},
				CreationTimestamp: util.NewTime(now.Add(-15 * time.Second)),
			},
			Status: buildapi.BuildStatus{
				Phase: buildapi.BuildPhasePending,
			},
		},
	}
	for i := range builds {
		buildgraph.EnsureBuildNode(g, &builds[i])
	}

	buildgraph.EnsureBuildConfigNode(g, &buildapi.BuildConfig{
		ObjectMeta: kapi.ObjectMeta{Namespace: "default", Name: "build1"},
		Spec: buildapi.BuildConfigSpec{
			Triggers: []buildapi.BuildTriggerPolicy{
				{
					ImageChange: &buildapi.ImageChangeTrigger{},
				},
			},
			BuildSpec: buildapi.BuildSpec{
				Strategy: buildapi.BuildStrategy{
					Type: buildapi.SourceBuildStrategyType,
					SourceStrategy: &buildapi.SourceBuildStrategy{
						From: kapi.ObjectReference{Kind: "ImageStreamTag", Name: "test:base-image"},
					},
				},
				Output: buildapi.BuildOutput{
					To: &kapi.ObjectReference{Kind: "ImageStreamTag", Name: "other:tag1"},
				},
			},
		},
	})
	bcTestNode := buildgraph.EnsureBuildConfigNode(g, &buildapi.BuildConfig{
		ObjectMeta: kapi.ObjectMeta{Namespace: "default", Name: "test"},
		Spec: buildapi.BuildConfigSpec{
			BuildSpec: buildapi.BuildSpec{
				Output: buildapi.BuildOutput{
					To: &kapi.ObjectReference{Kind: "ImageStreamTag", Name: "other:base-image"},
				},
			},
		},
	})
	buildgraph.EnsureBuildConfigNode(g, &buildapi.BuildConfig{
		ObjectMeta: kapi.ObjectMeta{Namespace: "default", Name: "build2"},
		Spec: buildapi.BuildConfigSpec{
			BuildSpec: buildapi.BuildSpec{
				Output: buildapi.BuildOutput{
					To: &kapi.ObjectReference{Kind: "DockerImage", Name: "mycustom/repo/image:tag2"},
				},
			},
		},
	})
	kubegraph.EnsureServiceNode(g, &kapi.Service{
		ObjectMeta: kapi.ObjectMeta{Namespace: "default", Name: "svc-is-ignored"},
		Spec: kapi.ServiceSpec{
			Selector: nil,
		},
	})
	kubegraph.EnsureServiceNode(g, &kapi.Service{
		ObjectMeta: kapi.ObjectMeta{Namespace: "default", Name: "svc1"},
		Spec: kapi.ServiceSpec{
			Selector: map[string]string{
				"deploymentconfig": "deploy1",
			},
		},
	})
	kubegraph.EnsureServiceNode(g, &kapi.Service{
		ObjectMeta: kapi.ObjectMeta{Namespace: "default", Name: "svc2"},
		Spec: kapi.ServiceSpec{
			Selector: map[string]string{
				"deploymentconfig": "deploy1",
				"env":              "prod",
			},
		},
	})
	deploygraph.EnsureDeploymentConfigNode(g, &deployapi.DeploymentConfig{
		ObjectMeta: kapi.ObjectMeta{Namespace: "other", Name: "deploy1"},
		Triggers: []deployapi.DeploymentTriggerPolicy{
			{
				ImageChangeParams: &deployapi.DeploymentTriggerImageChangeParams{
					From:           kapi.ObjectReference{Namespace: "default", Name: "other"},
					ContainerNames: []string{"1", "2"},
					Tag:            "tag1",
				},
			},
		},
		Template: deployapi.DeploymentTemplate{
			ControllerTemplate: kapi.ReplicationControllerSpec{
				Template: &kapi.PodTemplateSpec{
					ObjectMeta: kapi.ObjectMeta{
						Labels: map[string]string{
							"deploymentconfig": "deploy1",
							"env":              "prod",
						},
					},
					Spec: kapi.PodSpec{
						Containers: []kapi.Container{
							{
								Name:  "1",
								Image: "mycustom/repo/image",
							},
							{
								Name:  "2",
								Image: "mycustom/repo/image2",
							},
							{
								Name:  "3",
								Image: "mycustom/repo/image3",
							},
						},
					},
				},
			},
		},
	})
	deploygraph.EnsureDeploymentConfigNode(g, &deployapi.DeploymentConfig{
		ObjectMeta: kapi.ObjectMeta{Namespace: "default", Name: "deploy2"},
		Template: deployapi.DeploymentTemplate{
			ControllerTemplate: kapi.ReplicationControllerSpec{
				Template: &kapi.PodTemplateSpec{
					ObjectMeta: kapi.ObjectMeta{
						Labels: map[string]string{
							"deploymentconfig": "deploy2",
							"env":              "dev",
						},
					},
					Spec: kapi.PodSpec{
						Containers: []kapi.Container{
							{
								Name:  "1",
								Image: "someother/image:v1",
							},
						},
					},
				},
			},
		},
	})

	kubeedges.AddAllExposedPodTemplateSpecEdges(g)
	buildedges.AddAllInputOutputEdges(g)
	buildedges.AddAllBuildEdges(g)
	deployedges.AddAllTriggerEdges(g)
	deployedges.AddAllDeploymentEdges(g)

	t.Log(g)

	for _, edge := range g.Edges() {
		if g.EdgeKinds(edge).Has(osgraph.UnknownEdgeKind) {
			t.Errorf("edge reported unknown kind: %#v", edge)
		}
	}

	// imagestreamtag default/other:base-image
	istID := 0
	for _, node := range g.Nodes() {
		if g.Name(node) == "ImageStreamTag|default/other:base-image" {
			istID = node.ID()
			break
		}
	}

	edge := g.Edge(concrete.Node(bcTestNode.ID()), concrete.Node(istID))
	if edge == nil {
		t.Fatalf("failed to find edge between %d and %d", bcTestNode.ID(), istID)
	}
	if len(g.SubgraphWithNodes([]graph.Node{edge.From(), edge.To()}, osgraph.ExistingDirectEdge).Edges()) != 1 {
		t.Fatalf("expected one edge")
	}
	if len(g.SubgraphWithNodes([]graph.Node{edge.To(), edge.From()}, osgraph.ExistingDirectEdge).Edges()) != 1 {
		t.Fatalf("expected one edge")
	}

	if e := g.Edge(concrete.Node(bcTestNode.ID()), concrete.Node(istID)); e == nil {
		t.Errorf("expected edge for %d-%d", bcTestNode.ID(), istID)
	}

	coveredNodes := IntSet{}

	serviceGroups, coveredByServiceGroups := AllServiceGroups(g, coveredNodes)
	coveredNodes.Insert(coveredByServiceGroups.List()...)

	bareDCPipelines, coveredByDCs := AllDeploymentConfigPipelines(g, coveredNodes)
	coveredNodes.Insert(coveredByDCs.List()...)

	if len(bareDCPipelines) != 1 {
		t.Fatalf("unexpected pipelines: %#v", bareDCPipelines)
	}
	if len(coveredNodes) != 10 {
		t.Fatalf("unexpected covered nodes: %#v", coveredNodes)
	}

	for _, bareDCPipeline := range bareDCPipelines {
		t.Logf("from %s", bareDCPipeline.Deployment.Name)
		for _, path := range bareDCPipeline.Images {
			t.Logf("  %v", path)
		}
	}

	if len(serviceGroups) != 3 {
		t.Errorf("unexpected service groups: %#v", serviceGroups)
	}
	for _, serviceGroup := range serviceGroups {
		t.Logf("service %s", serviceGroup.Service.Name)
		indent := "  "

		for _, deployment := range serviceGroup.DeploymentConfigPipelines {
			t.Logf("%sdeployment %s", indent, deployment.Deployment.Name)
			for _, image := range deployment.Images {
				t.Logf("%s  image %s", indent, image.Image.ImageSpec())
				if image.Build != nil {
					if image.LastSuccessfulBuild != nil {
						t.Logf("%s    built at %s", indent, image.LastSuccessfulBuild.Build.CreationTimestamp)
					} else if image.LastUnsuccessfulBuild != nil {
						t.Logf("%s    build %s at %s", indent, image.LastUnsuccessfulBuild.Build.Status, image.LastUnsuccessfulBuild.Build.CreationTimestamp)
					}
					for _, b := range image.ActiveBuilds {
						t.Logf("%s    build %s %s", indent, b.Build.Name, b.Build.Status)
					}
				}
			}
		}
	}
}
func (a *HorizontalPodAutoscalerController) reconcileAutoscalers() error {
	ns := api.NamespaceAll
	list, err := a.expClient.HorizontalPodAutoscalers(ns).List(labels.Everything(), fields.Everything())
	if err != nil {
		return fmt.Errorf("error listing nodes: %v", err)
	}
	for _, hpa := range list.Items {
		reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Namespace, hpa.Spec.ScaleRef.Name)

		scale, err := a.expClient.Scales(hpa.Spec.ScaleRef.Namespace).Get(hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Name)
		if err != nil {
			glog.Warningf("Failed to query scale subresource for %s: %v", reference, err)
			continue
		}
		currentReplicas := scale.Status.Replicas
		currentConsumption, err := a.metricsClient.ResourceConsumption(hpa.Spec.ScaleRef.Namespace).Get(hpa.Spec.Target.Resource,
			scale.Status.Selector)

		// TODO: what to do on partial errors (like metrics obtained for 75% of pods).
		if err != nil {
			glog.Warningf("Error while getting metrics for %s: %v", reference, err)
			continue
		}

		// if the ratio is 1.2 we want to have 2 replicas
		desiredReplicas := 1 + int((currentConsumption.Quantity.MilliValue()*int64(currentReplicas))/hpa.Spec.Target.Quantity.MilliValue())

		if desiredReplicas < hpa.Spec.MinCount {
			desiredReplicas = hpa.Spec.MinCount
		}
		if desiredReplicas > hpa.Spec.MaxCount {
			desiredReplicas = hpa.Spec.MaxCount
		}
		now := time.Now()
		rescale := false
		if desiredReplicas != currentReplicas {
			// Going down
			if desiredReplicas < currentReplicas && (hpa.Status == nil || hpa.Status.LastScaleTimestamp == nil ||
				hpa.Status.LastScaleTimestamp.Add(downscaleForbiddenWindow).Before(now)) {
				rescale = true
			}

			// Going up
			if desiredReplicas > currentReplicas && (hpa.Status == nil || hpa.Status.LastScaleTimestamp == nil ||
				hpa.Status.LastScaleTimestamp.Add(upscaleForbiddenWindow).Before(now)) {
				rescale = true
			}

			if rescale {
				scale.Spec.Replicas = desiredReplicas
				_, err = a.expClient.Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale)
				if err != nil {
					glog.Warningf("Failed to rescale %s: %v", reference, err)
					continue
				}
			}
		}

		status := expapi.HorizontalPodAutoscalerStatus{
			CurrentReplicas:    currentReplicas,
			DesiredReplicas:    desiredReplicas,
			CurrentConsumption: currentConsumption,
		}
		hpa.Status = &status
		if rescale {
			now := util.NewTime(now)
			hpa.Status.LastScaleTimestamp = &now
		}

		_, err = a.expClient.HorizontalPodAutoscalers(hpa.Namespace).Update(&hpa)
		if err != nil {
			glog.Warningf("Failed to update HorizontalPodAutoscaler %s: %v", hpa.Name, err)
			continue
		}
	}
	return nil
}