Пример #1
0
func ReaperFor(kind unversioned.GroupKind, c client.Interface) (Reaper, error) {
	switch kind {
	case api.Kind("ReplicationController"):
		return &ReplicationControllerReaper{c, Interval, Timeout}, nil

	case extensions.Kind("ReplicaSet"):
		return &ReplicaSetReaper{c, Interval, Timeout}, nil

	case extensions.Kind("DaemonSet"):
		return &DaemonSetReaper{c, Interval, Timeout}, nil

	case api.Kind("Pod"):
		return &PodReaper{c}, nil

	case api.Kind("Service"):
		return &ServiceReaper{c}, nil

	case extensions.Kind("Job"), batch.Kind("Job"):
		return &JobReaper{c, Interval, Timeout}, nil

	case extensions.Kind("Deployment"):
		return &DeploymentReaper{c, Interval, Timeout}, nil

	}
	return nil, &NoSuchReaperError{kind}
}
Пример #2
0
func ReaperFor(kind schema.GroupKind, c internalclientset.Interface) (Reaper, error) {
	switch kind {
	case api.Kind("ReplicationController"):
		return &ReplicationControllerReaper{c.Core(), Interval, Timeout}, nil

	case extensions.Kind("ReplicaSet"):
		return &ReplicaSetReaper{c.Extensions(), Interval, Timeout}, nil

	case extensions.Kind("DaemonSet"):
		return &DaemonSetReaper{c.Extensions(), Interval, Timeout}, nil

	case api.Kind("Pod"):
		return &PodReaper{c.Core()}, nil

	case api.Kind("Service"):
		return &ServiceReaper{c.Core()}, nil

	case extensions.Kind("Job"), batch.Kind("Job"):
		return &JobReaper{c.Batch(), c.Core(), Interval, Timeout}, nil

	case apps.Kind("StatefulSet"):
		return &StatefulSetReaper{c.Apps(), c.Core(), Interval, Timeout}, nil

	case extensions.Kind("Deployment"):
		return &DeploymentReaper{c.Extensions(), c.Extensions(), Interval, Timeout}, nil

	}
	return nil, &NoSuchReaperError{kind}
}
Пример #3
0
func generateConfigsForGroup(
	nss []*v1.Namespace,
	groupName string,
	size, count int,
	image string,
	command []string,
	kind schema.GroupKind,
	secretsPerPod int,
) ([]testutils.RunObjectConfig, []*testutils.SecretConfig) {
	configs := make([]testutils.RunObjectConfig, 0, count)
	secretConfigs := make([]*testutils.SecretConfig, 0, count*secretsPerPod)
	for i := 1; i <= count; i++ {
		namespace := nss[i%len(nss)].Name
		secretNames := make([]string, 0, secretsPerPod)

		for j := 0; j < secretsPerPod; j++ {
			secretName := fmt.Sprintf("%v-%v-secret-%v", groupName, i, j)
			secretConfigs = append(secretConfigs, &testutils.SecretConfig{
				Content:   map[string]string{"foo": "bar"},
				Client:    nil, // this will be overwritten later
				Name:      secretName,
				Namespace: namespace,
				LogFunc:   framework.Logf,
			})
			secretNames = append(secretNames, secretName)
		}

		baseConfig := &testutils.RCConfig{
			Client:         nil, // this will be overwritten later
			InternalClient: nil, // this will be overwritten later
			Name:           groupName + "-" + strconv.Itoa(i),
			Namespace:      namespace,
			Timeout:        10 * time.Minute,
			Image:          image,
			Command:        command,
			Replicas:       size,
			CpuRequest:     10,       // 0.01 core
			MemRequest:     26214400, // 25MB
			SecretNames:    secretNames,
		}

		var config testutils.RunObjectConfig
		switch kind {
		case api.Kind("ReplicationController"):
			config = baseConfig
		case extensions.Kind("ReplicaSet"):
			config = &testutils.ReplicaSetConfig{RCConfig: *baseConfig}
		case extensions.Kind("Deployment"):
			config = &testutils.DeploymentConfig{RCConfig: *baseConfig}
		case batch.Kind("Job"):
			config = &testutils.JobConfig{RCConfig: *baseConfig}
		default:
			framework.Failf("Unsupported kind for config creation: %v", kind)
		}
		configs = append(configs, config)
	}
	return configs, secretConfigs
}
Пример #4
0
func ScalerFor(kind unversioned.GroupKind, c client.Interface) (Scaler, error) {
	switch kind {
	case api.Kind("ReplicationController"):
		return &ReplicationControllerScaler{c}, nil
	case extensions.Kind("ReplicaSet"):
		return &ReplicaSetScaler{c.Extensions()}, nil
	case extensions.Kind("Job"), batch.Kind("Job"):
		return &JobScaler{c.Batch()}, nil // Either kind of job can be scaled with Batch interface.
	case extensions.Kind("Deployment"):
		return &DeploymentScaler{c.Extensions()}, nil
	}
	return nil, fmt.Errorf("no scaler has been implemented for %q", kind)
}
Пример #5
0
func ScalerFor(kind schema.GroupKind, c internalclientset.Interface) (Scaler, error) {
	switch kind {
	case api.Kind("ReplicationController"):
		return &ReplicationControllerScaler{c.Core()}, nil
	case extensions.Kind("ReplicaSet"):
		return &ReplicaSetScaler{c.Extensions()}, nil
	case batch.Kind("Job"):
		return &JobScaler{c.Batch()}, nil // Either kind of job can be scaled with Batch interface.
	case apps.Kind("StatefulSet"):
		return &StatefulSetScaler{c.Apps()}, nil
	case extensions.Kind("Deployment"):
		return &DeploymentScaler{c.Extensions()}, nil
	}
	return nil, fmt.Errorf("no scaler has been implemented for %q", kind)
}
Пример #6
0
func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
	jobs := reaper.Batch().Jobs(namespace)
	pods := reaper.Pods(namespace)
	scaler, err := ScalerFor(batch.Kind("Job"), *reaper)
	if err != nil {
		return err
	}
	job, err := jobs.Get(name)
	if err != nil {
		return err
	}
	if timeout == 0 {
		// we will never have more active pods than job.Spec.Parallelism
		parallelism := *job.Spec.Parallelism
		timeout = Timeout + time.Duration(10*parallelism)*time.Second
	}

	// TODO: handle overlapping jobs
	retry := NewRetryParams(reaper.pollInterval, reaper.timeout)
	waitForJobs := NewRetryParams(reaper.pollInterval, timeout)
	if err = scaler.Scale(namespace, name, 0, nil, retry, waitForJobs); err != nil {
		return err
	}
	// at this point only dead pods are left, that should be removed
	selector, _ := unversioned.LabelSelectorAsSelector(job.Spec.Selector)
	options := api.ListOptions{LabelSelector: selector}
	podList, err := pods.List(options)
	if err != nil {
		return err
	}
	errList := []error{}
	for _, pod := range podList.Items {
		if err := pods.Delete(pod.Name, gracePeriod); err != nil {
			// ignores the error when the pod isn't found
			if !errors.IsNotFound(err) {
				errList = append(errList, err)
			}
		}
	}
	if len(errList) > 0 {
		return utilerrors.NewAggregate(errList)
	}
	// once we have all the pods removed we can safely remove the job itself
	return jobs.Delete(name, nil)
}
Пример #7
0
	})

	It("should scale a job up", func() {
		startParallelism := int32(1)
		endParallelism := int32(2)
		By("Creating a job")
		job := newTestV1Job("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions)
		job, err := createV1Job(f.Client, f.Namespace.Name, job)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring active pods == startParallelism")
		err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, startParallelism)
		Expect(err).NotTo(HaveOccurred())

		By("scale job up")
		scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.Client)
		Expect(err).NotTo(HaveOccurred())
		waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
		waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
		scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring active pods == endParallelism")
		err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, endParallelism)
		Expect(err).NotTo(HaveOccurred())
	})

	It("should scale a job down", func() {
		startParallelism := int32(2)
		endParallelism := int32(1)
		By("Creating a job")
Пример #8
0
	})

	It("should scale a job up", func() {
		startParallelism := int32(1)
		endParallelism := int32(2)
		By("Creating a job")
		job := newTestJob("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions)
		job, err := createJob(f.Client, f.Namespace.Name, job)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring active pods == startParallelism")
		err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, startParallelism)
		Expect(err).NotTo(HaveOccurred())

		By("scale job up")
		scaler, err := kubectl.ScalerFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client))
		Expect(err).NotTo(HaveOccurred())
		waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
		waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
		scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring active pods == endParallelism")
		err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism)
		Expect(err).NotTo(HaveOccurred())
	})

	It("should scale a job down", func() {
		startParallelism := int32(2)
		endParallelism := int32(1)
		By("Creating a job")
Пример #9
0
	})

	It("should scale a job up", func() {
		startParallelism := int32(1)
		endParallelism := int32(2)
		By("Creating a job")
		job := newTestJob("notTerminate", "scale-up", v1.RestartPolicyNever, startParallelism, completions)
		job, err := createJob(f.ClientSet, f.Namespace.Name, job)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring active pods == startParallelism")
		err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, startParallelism)
		Expect(err).NotTo(HaveOccurred())

		By("scale job up")
		scaler, err := kubectl.ScalerFor(batchinternal.Kind("Job"), f.InternalClientset)
		Expect(err).NotTo(HaveOccurred())
		waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
		waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
		scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring active pods == endParallelism")
		err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, endParallelism)
		Expect(err).NotTo(HaveOccurred())
	})

	It("should scale a job down", func() {
		startParallelism := int32(2)
		endParallelism := int32(1)
		By("Creating a job")
Пример #10
0
func TestPodNodeConstraintsResources(t *testing.T) {
	ns := kapi.NamespaceDefault
	testconfigs := []struct {
		config         *api.PodNodeConstraintsConfig
		userinfo       user.Info
		reviewResponse *authorizationapi.SubjectAccessReviewResponse
	}{
		{
			config:         testConfig(),
			userinfo:       serviceaccount.UserInfo("", "", ""),
			reviewResponse: reviewResponse(false, ""),
		},
	}
	testresources := []struct {
		resource      func(bool) runtime.Object
		kind          unversioned.GroupKind
		groupresource unversioned.GroupResource
		prefix        string
	}{
		{
			resource:      replicationController,
			kind:          kapi.Kind("ReplicationController"),
			groupresource: kapi.Resource("replicationcontrollers"),
			prefix:        "ReplicationController",
		},
		{
			resource:      deployment,
			kind:          extensions.Kind("Deployment"),
			groupresource: extensions.Resource("deployments"),
			prefix:        "Deployment",
		},
		{
			resource:      replicaSet,
			kind:          extensions.Kind("ReplicaSet"),
			groupresource: extensions.Resource("replicasets"),
			prefix:        "ReplicaSet",
		},
		{
			resource:      job,
			kind:          extensions.Kind("Job"),
			groupresource: extensions.Resource("jobs"),
			prefix:        "Job",
		},
		{
			resource:      job,
			kind:          batch.Kind("Job"),
			groupresource: batch.Resource("jobs"),
			prefix:        "Job",
		},
		{
			resource:      deploymentConfig,
			kind:          deployapi.Kind("DeploymentConfig"),
			groupresource: deployapi.Resource("deploymentconfigs"),
			prefix:        "DeploymentConfig",
		},
		{
			resource:      podTemplate,
			kind:          deployapi.Kind("PodTemplate"),
			groupresource: deployapi.Resource("podtemplates"),
			prefix:        "PodTemplate",
		},
		{
			resource:      podSecurityPolicySubjectReview,
			kind:          securityapi.Kind("PodSecurityPolicySubjectReview"),
			groupresource: securityapi.Resource("podsecuritypolicysubjectreviews"),
			prefix:        "PodSecurityPolicy",
		},
		{
			resource:      podSecurityPolicySelfSubjectReview,
			kind:          securityapi.Kind("PodSecurityPolicySelfSubjectReview"),
			groupresource: securityapi.Resource("podsecuritypolicyselfsubjectreviews"),
			prefix:        "PodSecurityPolicy",
		},
		{
			resource:      podSecurityPolicyReview,
			kind:          securityapi.Kind("PodSecurityPolicyReview"),
			groupresource: securityapi.Resource("podsecuritypolicyreviews"),
			prefix:        "PodSecurityPolicy",
		},
	}
	testparams := []struct {
		nodeselector     bool
		expectedErrorMsg string
		prefix           string
	}{
		{
			nodeselector:     true,
			expectedErrorMsg: "node selection by label(s) [bogus] is prohibited by policy for your role",
			prefix:           "with nodeSelector",
		},
		{
			nodeselector:     false,
			expectedErrorMsg: "",
			prefix:           "without nodeSelector",
		},
	}
	testops := []struct {
		operation admission.Operation
	}{
		{
			operation: admission.Create,
		},
		{
			operation: admission.Update,
		},
	}
	for _, tc := range testconfigs {
		for _, tr := range testresources {
			for _, tp := range testparams {
				for _, top := range testops {
					var expectedError error
					errPrefix := fmt.Sprintf("%s; %s; %s", tr.prefix, tp.prefix, top.operation)
					prc := NewPodNodeConstraints(tc.config)
					prc.(oadmission.WantsAuthorizer).SetAuthorizer(fakeAuthorizer(t))
					err := prc.(oadmission.Validator).Validate()
					if err != nil {
						checkAdmitError(t, err, expectedError, errPrefix)
						continue
					}
					attrs := admission.NewAttributesRecord(tr.resource(tp.nodeselector), nil, tr.kind.WithVersion("version"), ns, "test", tr.groupresource.WithVersion("version"), "", top.operation, tc.userinfo)
					if tp.expectedErrorMsg != "" {
						expectedError = admission.NewForbidden(attrs, fmt.Errorf(tp.expectedErrorMsg))
					}
					err = prc.Admit(attrs)
					checkAdmitError(t, err, expectedError, errPrefix)
				}
			}
		}
	}
}
Пример #11
0
type podNodeConstraints struct {
	*admission.Handler
	selectorLabelBlacklist sets.String
	config                 *api.PodNodeConstraintsConfig
	authorizer             authorizer.Authorizer
}

// resourcesToCheck is a map of resources and corresponding kinds of things that
// we want handled in this plugin
// TODO: Include a function that will extract the PodSpec from the resource for
// each type added here.
var resourcesToCheck = map[unversioned.GroupResource]unversioned.GroupKind{
	kapi.Resource("pods"):                   kapi.Kind("Pod"),
	kapi.Resource("podtemplates"):           kapi.Kind("PodTemplate"),
	kapi.Resource("replicationcontrollers"): kapi.Kind("ReplicationController"),
	batch.Resource("jobs"):                  batch.Kind("Job"),
	extensions.Resource("deployments"):      extensions.Kind("Deployment"),
	extensions.Resource("replicasets"):      extensions.Kind("ReplicaSet"),
	extensions.Resource("jobs"):             extensions.Kind("Job"),
	deployapi.Resource("deploymentconfigs"): deployapi.Kind("DeploymentConfig"),
}

// resourcesToIgnore is a list of resource kinds that contain a PodSpec that
// we choose not to handle in this plugin
var resourcesToIgnore = []unversioned.GroupKind{
	extensions.Kind("DaemonSet"),
}

func shouldCheckResource(resource unversioned.GroupResource, kind unversioned.GroupKind) (bool, error) {
	expectedKind, shouldCheck := resourcesToCheck[resource]
	if !shouldCheck {
Пример #12
0
func kindSupportsGarbageCollector(kind schema.GroupKind) bool {
	return kind != extensions.Kind("Deployment") && kind != batch.Kind("Job")
}
Пример #13
0
					Replicas:             (totalPods + numberOfCollections - 1) / numberOfCollections,
					CpuRequest:           nodeCpuCapacity / 100,
					MemRequest:           nodeMemCapacity / 100,
					MaxContainerFailures: &MaxContainerFailures,
					Silent:               true,
					LogFunc:              framework.Logf,
					SecretNames:          secretNames,
				}
				switch itArg.kind {
				case api.Kind("ReplicationController"):
					configs[i] = baseConfig
				case extensions.Kind("ReplicaSet"):
					configs[i] = &testutils.ReplicaSetConfig{RCConfig: *baseConfig}
				case extensions.Kind("Deployment"):
					configs[i] = &testutils.DeploymentConfig{RCConfig: *baseConfig}
				case batch.Kind("Job"):
					configs[i] = &testutils.JobConfig{RCConfig: *baseConfig}
				default:
					framework.Failf("Unsupported kind: %v", itArg.kind)
				}
			}

			dConfig := DensityTestConfig{
				ClientSet:         f.ClientSet,
				InternalClientset: f.InternalClientset,
				Configs:           configs,
				PodCount:          totalPods,
				PollInterval:      DensityPollInterval,
				kind:              itArg.kind,
				SecretConfigs:     secretConfigs,
			}
Пример #14
0
		// What kind of resource we should be creating. Default: ReplicationController
		kind           schema.GroupKind
		secretsPerPod  int
		daemonsPerNode int
	}

	densityTests := []Density{
		// TODO: Expose runLatencyTest as ginkgo flag.
		{podsPerNode: 3, runLatencyTest: false, kind: api.Kind("ReplicationController")},
		{podsPerNode: 30, runLatencyTest: true, kind: api.Kind("ReplicationController")},
		{podsPerNode: 50, runLatencyTest: false, kind: api.Kind("ReplicationController")},
		{podsPerNode: 95, runLatencyTest: true, kind: api.Kind("ReplicationController")},
		{podsPerNode: 100, runLatencyTest: false, kind: api.Kind("ReplicationController")},
		// Tests for other resource types:
		{podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment")},
		{podsPerNode: 30, runLatencyTest: true, kind: batch.Kind("Job")},
		// Test scheduling when daemons are preset
		{podsPerNode: 30, runLatencyTest: true, kind: api.Kind("ReplicationController"), daemonsPerNode: 2},
		// Test with secrets
		{podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment"), secretsPerPod: 2},
	}

	for _, testArg := range densityTests {
		feature := "ManualPerformance"
		switch testArg.podsPerNode {
		case 30:
			if testArg.kind == api.Kind("ReplicationController") && testArg.daemonsPerNode == 0 && testArg.secretsPerPod == 0 {
				feature = "Performance"
			}
		case 95:
			feature = "HighDensityPerformance"
Пример #15
0
type podNodeConstraints struct {
	*admission.Handler
	selectorLabelBlacklist sets.String
	config                 *api.PodNodeConstraintsConfig
	authorizer             authorizer.Authorizer
}

// resourcesToCheck is a map of resources and corresponding kinds of things that
// we want handled in this plugin
// TODO: Include a function that will extract the PodSpec from the resource for
// each type added here.
var resourcesToCheck = map[unversioned.GroupResource]unversioned.GroupKind{
	kapi.Resource("pods"):                                       kapi.Kind("Pod"),
	kapi.Resource("podtemplates"):                               kapi.Kind("PodTemplate"),
	kapi.Resource("replicationcontrollers"):                     kapi.Kind("ReplicationController"),
	batch.Resource("jobs"):                                      batch.Kind("Job"),
	batch.Resource("jobtemplates"):                              batch.Kind("JobTemplate"),
	batch.Resource("scheduledjobs"):                             batch.Kind("ScheduledJob"),
	extensions.Resource("deployments"):                          extensions.Kind("Deployment"),
	extensions.Resource("replicasets"):                          extensions.Kind("ReplicaSet"),
	extensions.Resource("jobs"):                                 extensions.Kind("Job"),
	extensions.Resource("jobtemplates"):                         extensions.Kind("JobTemplate"),
	apps.Resource("petsets"):                                    apps.Kind("PetSet"),
	deployapi.Resource("deploymentconfigs"):                     deployapi.Kind("DeploymentConfig"),
	securityapi.Resource("podsecuritypolicysubjectreviews"):     securityapi.Kind("PodSecurityPolicySubjectReview"),
	securityapi.Resource("podsecuritypolicyselfsubjectreviews"): securityapi.Kind("PodSecurityPolicySelfSubjectReview"),
	securityapi.Resource("podsecuritypolicyreviews"):            securityapi.Kind("PodSecurityPolicyReview"),
}

// resourcesToIgnore is a list of resource kinds that contain a PodSpec that
// we choose not to handle in this plugin
Пример #16
0
func (config *JobConfig) GetKind() schema.GroupKind {
	return batchinternal.Kind("Job")
}
Пример #17
0
func (c *ErrorJobs) Update(job *batch.Job) (*batch.Job, error) {
	if c.invalid {
		return nil, kerrors.NewInvalid(batch.Kind(job.Kind), job.Name, nil)
	}
	return nil, errors.New("Job update failure")
}
Пример #18
0
		image       string
		command     []string
		// What kind of resource we want to create
		kind           schema.GroupKind
		services       bool
		secretsPerPod  int
		daemonsPerNode int
	}

	loadTests := []Load{
		// The container will consume 1 cpu and 512mb of memory.
		{podsPerNode: 3, image: "jess/stress", command: []string{"stress", "-c", "1", "-m", "2"}, kind: api.Kind("ReplicationController")},
		{podsPerNode: 30, image: "gcr.io/google_containers/serve_hostname:v1.4", kind: api.Kind("ReplicationController")},
		// Tests for other resource types
		{podsPerNode: 30, image: "gcr.io/google_containers/serve_hostname:v1.4", kind: extensions.Kind("Deployment")},
		{podsPerNode: 30, image: "gcr.io/google_containers/serve_hostname:v1.4", kind: batch.Kind("Job")},
		// Test scheduling when daemons are preset
		{podsPerNode: 30, image: "gcr.io/google_containers/serve_hostname:v1.4", kind: api.Kind("ReplicationController"), daemonsPerNode: 2},
		// Test with secrets
		{podsPerNode: 30, image: "gcr.io/google_containers/serve_hostname:v1.4", kind: extensions.Kind("Deployment"), secretsPerPod: 2},
	}

	for _, testArg := range loadTests {
		feature := "ManualPerformance"
		if testArg.podsPerNode == 30 && testArg.kind == api.Kind("ReplicationController") && testArg.daemonsPerNode == 0 && testArg.secretsPerPod == 0 {
			feature = "Performance"
		}
		name := fmt.Sprintf("[Feature:%s] should be able to handle %v pods per node %v with %v secrets and %v daemons",
			feature,
			testArg.podsPerNode,
			testArg.kind,