Ejemplo n.º 1
0
	clientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
	"k8s.io/kubernetes/pkg/labels"
	"k8s.io/kubernetes/pkg/util/wait"
	"k8s.io/kubernetes/test/e2e/framework"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

var _ = framework.KubeDescribe("Mesos", func() {
	f := framework.NewDefaultFramework("pods")
	var c clientset.Interface
	var ns string

	BeforeEach(func() {
		framework.SkipUnlessProviderIs("mesos/docker")
		c = f.ClientSet
		ns = f.Namespace.Name
	})

	It("applies slave attributes as labels", func() {
		nodeClient := f.ClientSet.Core().Nodes()

		rackA := labels.SelectorFromSet(map[string]string{"k8s.mesosphere.io/attribute-rack": "1"})
		options := v1.ListOptions{LabelSelector: rackA.String()}
		nodes, err := nodeClient.List(options)
		if err != nil {
			framework.Failf("Failed to query for node: %v", err)
		}
		Expect(len(nodes.Items)).To(Equal(1))
	gkeEndpoint      = "https://test-container.sandbox.googleapis.com"
	gkeUpdateTimeout = 15 * time.Minute
)

var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
	f := framework.NewDefaultFramework("autoscaling")
	var c clientset.Interface
	var nodeCount int
	var coresPerNode int
	var memCapacityMb int
	var originalSizes map[string]int

	BeforeEach(func() {
		c = f.ClientSet
		framework.SkipUnlessProviderIs("gce", "gke")

		nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
		nodeCount = len(nodes.Items)
		Expect(nodeCount).NotTo(BeZero())
		cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
		mem := nodes.Items[0].Status.Capacity[v1.ResourceMemory]
		coresPerNode = int((&cpu).MilliValue() / 1000)
		memCapacityMb = int((&mem).Value() / 1024 / 1024)

		originalSizes = make(map[string]int)
		sum := 0
		for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
			size, err := GroupSize(mig)
			framework.ExpectNoError(err)
			By(fmt.Sprintf("Initial size of %s: %d", mig, size))
Ejemplo n.º 3
0
var _ = framework.KubeDescribe("Stateful Set recreate", func() {
	f := framework.NewDefaultFramework("pet-set-recreate")
	var c clientset.Interface
	var ns string

	labels := map[string]string{
		"foo": "bar",
		"baz": "blah",
	}
	headlessSvcName := "test"
	podName := "test-pod"
	statefulSetName := "web"
	petPodName := "web-0"

	BeforeEach(func() {
		framework.SkipUnlessProviderIs("gce", "gke", "vagrant")
		By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name)
		headlessService := createServiceSpec(headlessSvcName, "", true, labels)
		_, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(headlessService)
		framework.ExpectNoError(err)
		c = f.ClientSet
		ns = f.Namespace.Name
	})

	AfterEach(func() {
		if CurrentGinkgoTestDescription().Failed {
			dumpDebugInfo(c, ns)
		}
		By("Deleting all statefulset in ns " + ns)
		deleteAllStatefulSets(c, ns)
	})
Ejemplo n.º 4
0
	// considered failed.
	rebootNodeReadyAgainTimeout = 5 * time.Minute

	// How long pods have to be "ready" after the reboot.
	rebootPodReadyAgainTimeout = 5 * time.Minute
)

var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
	var f *framework.Framework

	BeforeEach(func() {
		// These tests requires SSH to nodes, so the provider check should be identical to there
		// (the limiting factor is the implementation of util.go's framework.GetSigner(...)).

		// Cluster must support node reboot
		framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
	})

	AfterEach(func() {
		if CurrentGinkgoTestDescription().Failed {
			// Most of the reboot tests just make sure that addon/system pods are running, so dump
			// events for the kube-system namespace on failures
			namespaceName := api.NamespaceSystem
			By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName))
			events, err := f.Client.Events(namespaceName).List(api.ListOptions{})
			Expect(err).NotTo(HaveOccurred())

			for _, e := range events.Items {
				framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
			}
		}
	// Validate federation apiserver, does not rely on underlying clusters or federation ingress controller.
	Describe("Ingress objects", func() {
		AfterEach(func() {
			nsName := f.FederationNamespace.Name
			// Delete registered ingresses.
			ingressList, err := f.FederationClientset_1_4.Extensions().Ingresses(nsName).List(api.ListOptions{})
			Expect(err).NotTo(HaveOccurred())
			for _, ingress := range ingressList.Items {
				err := f.FederationClientset_1_4.Extensions().Ingresses(nsName).Delete(ingress.Name, &api.DeleteOptions{})
				Expect(err).NotTo(HaveOccurred())
			}
		})

		It("should be created and deleted successfully", func() {
			framework.SkipUnlessFederated(f.Client)
			framework.SkipUnlessProviderIs("gce", "gke") // TODO: Federated ingress is not yet supported on non-GCP platforms.
			nsName := f.FederationNamespace.Name
			ingress := createIngressOrFail(f.FederationClientset_1_4, nsName)
			By(fmt.Sprintf("Creation of ingress %q in namespace %q succeeded.  Deleting ingress.", ingress.Name, nsName))
			// Cleanup
			err := f.FederationClientset_1_4.Extensions().Ingresses(nsName).Delete(ingress.Name, &api.DeleteOptions{})
			framework.ExpectNoError(err, "Error deleting ingress %q in namespace %q", ingress.Name, ingress.Namespace)
			By(fmt.Sprintf("Deletion of ingress %q in namespace %q succeeded.", ingress.Name, nsName))
		})
	})

	// e2e cases for federation ingress controller
	var _ = Describe("Federated Ingresses", func() {
		var (
			clusters                               map[string]*cluster // All clusters, keyed by cluster name
			primaryClusterName, federationName, ns string
Ejemplo n.º 6
0
				break T
			}
		}
	}

	// We should have exceeded the finalTransactionsExpected num of transactions.
	// If this fails, but there are transactions being created, we may need to recalibrate
	// the finalTransactionsExpected value - or else - your cluster is broken/slow !
	Ω(totalTransactions).Should(BeNumerically(">", finalTransactionsExpected))
}

var _ = framework.KubeDescribe("Pet Store [Feature:Example]", func() {

	BeforeEach(func() {
		// The shell scripts in k8petstore break on jenkins... Pure golang rewrite is in progress.
		framework.SkipUnlessProviderIs("local")
	})

	// The number of nodes dictates total number of generators/transaction expectations.
	var nodeCount int
	f := framework.NewDefaultFramework("petstore")

	It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout), func() {
		nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
		nodeCount = len(nodes.Items)

		loadGenerators := nodeCount
		restServers := nodeCount
		fmt.Printf("load generators / rest servers [ %v  /  %v ] ", loadGenerators, restServers)
		runK8petstore(restServers, loadGenerators, f.ClientSet, f.Namespace.Name, k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout)
	})
Ejemplo n.º 7
0
			testVolumeClient(c, config, volume, nil, "Hello Ceph!")
		})
	})

	////////////////////////////////////////////////////////////////////////
	// OpenStack Cinder
	////////////////////////////////////////////////////////////////////////

	// This test assumes that OpenStack client tools are installed
	// (/usr/bin/nova, /usr/bin/cinder and /usr/bin/keystone)
	// and that the usual OpenStack authentication env. variables are set
	// (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least).

	framework.KubeDescribe("Cinder", func() {
		It("should be mountable", func() {
			framework.SkipUnlessProviderIs("openstack")
			config := VolumeTestConfig{
				namespace: namespace.Name,
				prefix:    "cinder",
			}

			// We assume that namespace.Name is a random string
			volumeName := namespace.Name
			By("creating a test Cinder volume")
			output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput()
			outputString := string(output[:])
			framework.Logf("cinder output:\n%s", outputString)
			Expect(err).NotTo(HaveOccurred())

			defer func() {
				// Ignore any cleanup errors, there is not much we can do about
Ejemplo n.º 8
0
var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {

	f := framework.NewDefaultFramework("daemonrestart")
	rcName := "daemonrestart" + strconv.Itoa(numPods) + "-" + string(uuid.NewUUID())
	labelSelector := labels.Set(map[string]string{"name": rcName}).AsSelector()
	existingPods := cache.NewStore(cache.MetaNamespaceKeyFunc)
	var ns string
	var config framework.RCConfig
	var controller *cache.Controller
	var newPods cache.Store
	var stopCh chan struct{}
	var tracker *podTracker

	BeforeEach(func() {
		// These tests require SSH
		framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
		ns = f.Namespace.Name

		// All the restart tests need an rc and a watch on pods of the rc.
		// Additionally some of them might scale the rc during the test.
		config = framework.RCConfig{
			Client:      f.Client,
			Name:        rcName,
			Namespace:   ns,
			Image:       framework.GetPauseImageName(f.Client),
			Replicas:    numPods,
			CreatedPods: &[]*api.Pod{},
		}
		Expect(framework.RunRC(config)).NotTo(HaveOccurred())
		replacePods(*config.CreatedPods, existingPods)
Ejemplo n.º 9
0
	f := framework.NewDefaultFramework("daemonrestart")
	rcName := "daemonrestart" + strconv.Itoa(numPods) + "-" + string(util.NewUUID())
	labelSelector := labels.Set(map[string]string{"name": rcName}).AsSelector()
	existingPods := cache.NewStore(cache.MetaNamespaceKeyFunc)
	var ns string
	var config framework.RCConfig
	var controller *controllerframework.Controller
	var newPods cache.Store
	var stopCh chan struct{}
	var tracker *podTracker

	BeforeEach(func() {
		// These tests require SSH
		// TODO(11834): Enable this test in GKE once experimental API there is switched on
		framework.SkipUnlessProviderIs("gce", "aws")
		ns = f.Namespace.Name

		// All the restart tests need an rc and a watch on pods of the rc.
		// Additionally some of them might scale the rc during the test.
		config = framework.RCConfig{
			Client:      f.Client,
			Name:        rcName,
			Namespace:   ns,
			Image:       framework.GetPauseImageName(f.Client),
			Replicas:    numPods,
			CreatedPods: &[]*api.Pod{},
		}
		Expect(framework.RunRC(config)).NotTo(HaveOccurred())
		replacePods(*config.CreatedPods, existingPods)
Ejemplo n.º 10
0
					"availability": "",
				},
				"1.5Gi",
				"2Gi",
				nil, // there is currently nothing to check on OpenStack
			},
		}

		for i, t := range tests {
			// Beware of clojure, use local variables instead of those from
			// outer scope
			test := t
			suffix := fmt.Sprintf("%d", i)
			It(test.name, func() {
				if len(t.cloudProviders) > 0 {
					framework.SkipUnlessProviderIs(test.cloudProviders...)
				}

				class := newStorageClass(test, suffix)
				claim := newClaim(test, ns, suffix, false)
				testDynamicProvisioning(test, c, claim, class)
			})
		}
	})

	framework.KubeDescribe("DynamicProvisioner Alpha", func() {
		It("should provision alpha volumes [Slow]", func() {
			framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke")

			By("creating a claim with an alpha dynamic provisioning annotation")
			test := storageClassTest{
Ejemplo n.º 11
0
			testVolumeClient(c, config, volume, nil, "Hello Ceph!")
		})
	})

	////////////////////////////////////////////////////////////////////////
	// OpenStack Cinder
	////////////////////////////////////////////////////////////////////////

	// This test assumes that OpenStack client tools are installed
	// (/usr/bin/nova, /usr/bin/cinder and /usr/bin/keystone)
	// and that the usual OpenStack authentication env. variables are set
	// (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least).

	framework.KubeDescribe("Cinder", func() {
		It("should be mountable", func() {
			framework.SkipUnlessProviderIs("openstack")
			config := VolumeTestConfig{
				namespace: namespace.Name,
				prefix:    "cinder",
			}

			// We assume that namespace.Name is a random string
			volumeName := namespace.Name
			By("creating a test Cinder volume")
			output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput()
			outputString := string(output[:])
			framework.Logf("cinder output:\n%s", outputString)
			Expect(err).NotTo(HaveOccurred())

			defer func() {
				// Ignore any cleanup errors, there is not much we can do about