Exemplo n.º 1
0
func testAppArmorNode() {
	BeforeEach(func() {
		By("Loading AppArmor profiles for testing")
		framework.ExpectNoError(loadTestProfiles(), "Could not load AppArmor test profiles")
	})
	Context("when running with AppArmor", func() {
		f := framework.NewDefaultFramework("apparmor-test")

		It("should reject an unloaded profile", func() {
			status := runAppArmorTest(f, "localhost/"+"non-existant-profile")
			Expect(status.Phase).To(Equal(api.PodFailed), "PodStatus: %+v", status)
			Expect(status.Reason).To(Equal("AppArmor"), "PodStatus: %+v", status)
		})
		It("should enforce a profile blocking writes", func() {
			status := runAppArmorTest(f, "localhost/"+apparmorProfilePrefix+"deny-write")
			if len(status.ContainerStatuses) == 0 {
				framework.Failf("Unexpected pod status: %s", spew.Sdump(status))
				return
			}
			state := status.ContainerStatuses[0].State.Terminated
			Expect(state.ExitCode).To(Not(BeZero()), "ContainerStateTerminated: %+v", state)

		})
		It("should enforce a permissive profile", func() {
			status := runAppArmorTest(f, "localhost/"+apparmorProfilePrefix+"audit-write")
			if len(status.ContainerStatuses) == 0 {
				framework.Failf("Unexpected pod status: %s", spew.Sdump(status))
				return
			}
			state := status.ContainerStatuses[0].State.Terminated
			Expect(state.ExitCode).To(BeZero(), "ContainerStateTerminated: %+v", state)
		})
	})
}
Exemplo n.º 2
0
func NewDefaultFederatedFramework(baseName string) *Framework {
	f := &Framework{framework.NewDefaultFramework(baseName), nil, &v1.Namespace{}}

	BeforeEach(f.FederationBeforeEach)
	AfterEach(f.FederationAfterEach)

	return f
}
Exemplo n.º 3
0
func testNonAppArmorNode() {
	Context("when running without AppArmor", func() {
		f := framework.NewDefaultFramework("apparmor-test")

		It("should reject a pod with an AppArmor profile", func() {
			status := runAppArmorTest(f, "runtime/default")
			Expect(status.Phase).To(Equal(api.PodFailed), "PodStatus: %+v", status)
			Expect(status.Reason).To(Equal("AppArmor"), "PodStatus: %+v", status)
		})
	})
}
Exemplo n.º 4
0
// NewCLI initialize the upstream E2E framework and set the namespace to match
// with the project name. Note that this function does not initialize the project
// role bindings for the namespace.
func NewCLI(project, adminConfigPath string) *CLI {
	// Avoid every caller needing to provide a unique project name
	// SetupProject already treats this as a baseName
	uniqueProject := kapi.SimpleNameGenerator.GenerateName(fmt.Sprintf("%s-", project))

	client := &CLI{}
	client.kubeFramework = e2e.NewDefaultFramework(uniqueProject)
	client.outputDir = os.TempDir()
	client.username = "******"
	client.execPath = "oc"
	if len(adminConfigPath) == 0 {
		FatalErr(fmt.Errorf("You must set the KUBECONFIG variable to admin kubeconfig."))
	}
	client.adminConfigPath = adminConfigPath

	// Register custom ns setup func
	setCreateTestingNSFunc(uniqueProject, client.SetupProject)

	return client
}
Exemplo n.º 5
0
		},
		Spec: api.ServiceSpec{
			Ports: []api.ServicePort{
				{Port: 80, Name: "http", Protocol: "TCP"},
			},
			Selector: selector,
		},
	}
	if isHeadless {
		headlessService.Spec.ClusterIP = "None"
	}
	return headlessService
}

var _ = framework.KubeDescribe("DNS", func() {
	f := framework.NewDefaultFramework("dns")

	It("should provide DNS for the cluster [Conformance]", func() {
		verifyDNSPodIsRunning(f)

		// All the names we need to be able to resolve.
		// TODO: Spin up a separate test service and test that dns works for that service.
		namesToResolve := []string{
			"kubernetes.default",
			"kubernetes.default.svc",
			"kubernetes.default.svc.cluster.local",
			"google.com",
		}
		// Added due to #8512. This is critical for GCE and GKE deployments.
		if framework.ProviderIs("gce", "gke") {
			namesToResolve = append(namesToResolve, "metadata")
Exemplo n.º 6
0
	"k8s.io/client-go/pkg/api/unversioned"
	"k8s.io/client-go/pkg/api/v1"
	extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
	policy "k8s.io/client-go/pkg/apis/policy/v1beta1"
	"k8s.io/client-go/pkg/util/intstr"
	"k8s.io/kubernetes/pkg/util/wait"
	"k8s.io/kubernetes/test/e2e/framework"
)

// schedulingTimeout is longer specifically because sometimes we need to wait
// awhile to guarantee that we've been patient waiting for something ordinary
// to happen: a pod to get scheduled and move into Ready
const schedulingTimeout = 10 * time.Minute

var _ = framework.KubeDescribe("DisruptionController", func() {
	f := framework.NewDefaultFramework("disruption")
	var ns string
	var cs *kubernetes.Clientset

	BeforeEach(func() {
		// skip on GKE since alpha features are disabled
		framework.SkipIfProviderIs("gke")

		cs = f.StagingClient
		ns = f.Namespace.Name
	})

	It("should create a PodDisruptionBudget", func() {
		createPodDisruptionBudgetOrDie(cs, ns, intstr.FromString("1%"))
	})
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

const kubeProxyE2eImage = "gcr.io/google_containers/e2e-net-amd64:1.0"

var _ = framework.KubeDescribe("Network", func() {
	const (
		testDaemonHttpPort    = 11301
		testDaemonTcpPort     = 11302
		timeoutSeconds        = 10
		postFinTimeoutSeconds = 5
	)

	fr := framework.NewDefaultFramework("network")

	It("should set TCP CLOSE_WAIT timeout", func() {
		nodes := framework.GetReadySchedulableNodesOrDie(fr.ClientSet)
		ips := collectAddresses(nodes, api.NodeInternalIP)

		if len(nodes.Items) < 2 {
			framework.Skipf(
				"Test requires >= 2 Ready nodes, but there are only %v nodes",
				len(nodes.Items))
		}

		type NodeInfo struct {
			node   *api.Node
			name   string
			nodeIp string
Exemplo n.º 8
0
	c      clientset.Interface
	ns     string
	name   string
	labels []string

	cm      *v1.ConfigMap
	isValid bool

	dnsPod      *v1.Pod
	utilPod     *v1.Pod
	utilService *v1.Service
}

var _ = framework.KubeDescribe("DNS config map", func() {
	test := &dnsConfigMapTest{
		f:    framework.NewDefaultFramework("dns-config-map"),
		ns:   "kube-system",
		name: "kube-dns",
	}

	BeforeEach(func() {
		test.c = test.f.ClientSet
	})

	It("should be able to change configuration", func() {
		test.run()
	})
})

func (t *dnsConfigMapTest) init() {
	By("Finding a DNS pod")
Exemplo n.º 9
0
import (
	"fmt"

	"k8s.io/kubernetes/pkg/api/v1"
	metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
	clientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
	"k8s.io/kubernetes/pkg/labels"
	"k8s.io/kubernetes/pkg/util/wait"
	"k8s.io/kubernetes/test/e2e/framework"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

var _ = framework.KubeDescribe("Mesos", func() {
	f := framework.NewDefaultFramework("pods")
	var c clientset.Interface
	var ns string

	BeforeEach(func() {
		framework.SkipUnlessProviderIs("mesos/docker")
		c = f.ClientSet
		ns = f.Namespace.Name
	})

	It("applies slave attributes as labels", func() {
		nodeClient := f.ClientSet.Core().Nodes()

		rackA := labels.SelectorFromSet(map[string]string{"k8s.mesosphere.io/attribute-rack": "1"})
		options := v1.ListOptions{LabelSelector: rackA.String()}
		nodes, err := nodeClient.List(options)
Exemplo n.º 10
0
	"k8s.io/kubernetes/pkg/api"
	apierrors "k8s.io/kubernetes/pkg/api/errors"
	"k8s.io/kubernetes/pkg/util/uuid"
	"k8s.io/kubernetes/pkg/util/wait"
	"k8s.io/kubernetes/pkg/version"
	"k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
	"k8s.io/kubernetes/test/e2e/framework"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

var serviceAccountTokenNamespaceVersion = version.MustParse("v1.2.0")

var _ = framework.KubeDescribe("ServiceAccounts", func() {
	f := framework.NewDefaultFramework("svcaccounts")

	It("should ensure a single API token exists", func() {
		// wait for the service account to reference a single secret
		var secrets []api.ObjectReference
		framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) {
			By("waiting for a single token reference")
			sa, err := f.Client.ServiceAccounts(f.Namespace.Name).Get("default")
			if apierrors.IsNotFound(err) {
				framework.Logf("default service account was not found")
				return false, nil
			}
			if err != nil {
				framework.Logf("error getting default service account: %v", err)
				return false, err
			}
Exemplo n.º 11
0
	"k8s.io/kubernetes/test/e2e/framework"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

const (
	// How long to wait for a job to finish.
	v1JobTimeout = 15 * time.Minute

	// Job selector name
	v1JobSelectorKey = "job-name"
)

var _ = framework.KubeDescribe("V1Job", func() {
	f := framework.NewDefaultFramework("v1job")
	parallelism := int32(2)
	completions := int32(4)
	lotsOfFailures := int32(5) // more than completions

	// Simplest case: all pods succeed promptly
	It("should run a job to completion when tasks succeed", func() {
		By("Creating a job")
		job := newTestV1Job("succeed", "all-succeed", api.RestartPolicyNever, parallelism, completions)
		job, err := createV1Job(f.Client, f.Namespace.Name, job)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring job reaches completions")
		err = waitForV1JobFinish(f.Client, f.Namespace.Name, job.Name, completions)
		Expect(err).NotTo(HaveOccurred())
	})
Exemplo n.º 12
0
			}
		}
		// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
		// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.  Most tests
		// make use of some proxy feature to verify functionality. So, if a reboot test runs right before a test
		// that tries to get logs, for example, we may get unlucky and try to use a closed tunnel to a node that
		// was recently rebooted. There's no good way to framework.Poll for proxies being closed, so we sleep.
		//
		// TODO(cjcullen) reduce this sleep (#19314)
		if framework.ProviderIs("gke") {
			By("waiting 5 minutes for all dead tunnels to be dropped")
			time.Sleep(5 * time.Minute)
		}
	})

	f = framework.NewDefaultFramework("reboot")

	It("each node by ordering clean reboot and ensure they function upon restart", func() {
		// clean shutdown and restart
		// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is rebooted.
		testReboot(f.Client, "nohup sh -c 'sleep 10 && sudo reboot' >/dev/null 2>&1 &")
	})

	It("each node by ordering unclean reboot and ensure they function upon restart", func() {
		// unclean shutdown and restart
		// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is shutdown.
		testReboot(f.Client, "nohup sh -c 'sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &")
	})

	It("each node by triggering kernel panic and ensure they function upon restart", func() {
		// kernel panic
Exemplo n.º 13
0
package e2e

import (
	"fmt"

	"k8s.io/kubernetes/pkg/api"
	"k8s.io/kubernetes/pkg/api/resource"
	"k8s.io/kubernetes/test/e2e/framework"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

var _ = framework.KubeDescribe("LimitRange", func() {
	f := framework.NewDefaultFramework("limitrange")

	It("should create a LimitRange with defaults and ensure pod has those defaults applied.", func() {
		By("Creating a LimitRange")

		min := getResourceList("50m", "100Mi")
		max := getResourceList("500m", "500Mi")
		defaultLimit := getResourceList("500m", "500Mi")
		defaultRequest := getResourceList("100m", "200Mi")
		maxLimitRequestRatio := api.ResourceList{}
		limitRange := newLimitRange("limit-range", api.LimitTypeContainer,
			min, max,
			defaultLimit, defaultRequest,
			maxLimitRequestRatio)
		limitRange, err := f.Client.LimitRanges(f.Namespace.Name).Create(limitRange)
		Expect(err).NotTo(HaveOccurred())
Exemplo n.º 14
0
	petPodTimeout           = 5 * time.Minute
	zookeeperManifestPath   = "test/e2e/testing-manifests/petset/zookeeper"
	mysqlGaleraManifestPath = "test/e2e/testing-manifests/petset/mysql-galera"
	redisManifestPath       = "test/e2e/testing-manifests/petset/redis"
	cockroachDBManifestPath = "test/e2e/testing-manifests/petset/cockroachdb"
	// We don't restart MySQL cluster regardless of restartCluster, since MySQL doesn't handle restart well
	restartCluster = true

	// Timeout for reads from databases running on stateful pods.
	readTimeout = 60 * time.Second
)

// GCE Quota requirements: 3 pds, one per stateful pod manifest declared above.
// GCE Api requirements: nodes and master need storage r/w permissions.
var _ = framework.KubeDescribe("StatefulSet", func() {
	f := framework.NewDefaultFramework("statefulset")
	var ns string
	var c clientset.Interface

	BeforeEach(func() {
		c = f.ClientSet
		ns = f.Namespace.Name
	})

	framework.KubeDescribe("Basic StatefulSet functionality", func() {
		psName := "ss"
		labels := map[string]string{
			"foo": "bar",
			"baz": "blah",
		}
		headlessSvcName := "test"
Exemplo n.º 15
0
	}

	// We should have exceeded the finalTransactionsExpected num of transactions.
	// If this fails, but there are transactions being created, we may need to recalibrate
	// the finalTransactionsExpected value - or else - your cluster is broken/slow !
	Ω(totalTransactions).Should(BeNumerically(">", finalTransactionsExpected))
}

var _ = framework.KubeDescribe("Pet Store [Feature:Example]", func() {

	BeforeEach(func() {
		// The shell scripts in k8petstore break on jenkins... Pure golang rewrite is in progress.
		framework.SkipUnlessProviderIs("local")
	})

	// The number of nodes dictates total number of generators/transaction expectations.
	var nodeCount int
	f := framework.NewDefaultFramework("petstore")

	It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout), func() {
		nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
		nodeCount = len(nodes.Items)

		loadGenerators := nodeCount
		restServers := nodeCount
		fmt.Printf("load generators / rest servers [ %v  /  %v ] ", loadGenerators, restServers)
		runK8petstore(restServers, loadGenerators, f.ClientSet, f.Namespace.Name, k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout)
	})

})
Exemplo n.º 16
0
import (
	"time"

	"k8s.io/kubernetes/pkg/api/v1"
	"k8s.io/kubernetes/pkg/labels"
	"k8s.io/kubernetes/pkg/util/wait"
	"k8s.io/kubernetes/test/e2e/framework"
	testutils "k8s.io/kubernetes/test/utils"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

var _ = framework.KubeDescribe("Etcd failure [Disruptive]", func() {

	f := framework.NewDefaultFramework("etcd-failure")

	BeforeEach(func() {
		// This test requires:
		// - SSH
		// - master access
		// ... so the provider check should be identical to the intersection of
		// providers that provide those capabilities.
		framework.SkipUnlessProviderIs("gce")

		Expect(framework.RunRC(testutils.RCConfig{
			Client:    f.ClientSet,
			Name:      "baz",
			Namespace: f.Namespace.Name,
			Image:     framework.GetPauseImageName(f.ClientSet),
			Replicas:  1,
	"fmt"
	"os/exec"
	"strconv"
	"strings"
	"time"

	"k8s.io/kubernetes/pkg/api"
	"k8s.io/kubernetes/pkg/util/json"
	"k8s.io/kubernetes/test/e2e/framework"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

var _ = framework.KubeDescribe("Cluster level logging using GCL", func() {
	f := framework.NewDefaultFramework("gcl-logging")

	BeforeEach(func() {
		// TODO (crassirostris): Expand to GKE once the test is stable
		framework.SkipUnlessProviderIs("gce")
	})

	It("should check that logs from containers are ingested in GCL", func() {
		By("Running synthetic logger")
		createSynthLogger(f, expectedLinesCount)
		defer f.PodClient().Delete(synthLoggerPodName, &api.DeleteOptions{})
		err := framework.WaitForPodSuccessInNamespace(f.ClientSet, synthLoggerPodName, f.Namespace.Name)
		framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName))

		By("Waiting for logs to ingest")
		totalMissing := expectedLinesCount
Exemplo n.º 18
0
			},
			Containers: []api.Container{
				{
					Name:  "test-container",
					Image: "gcr.io/google_containers/busybox:1.24",
				},
			},
			RestartPolicy: api.RestartPolicyNever,
		},
	}

	return pod
}

var _ = framework.KubeDescribe("Security Context [Feature:SecurityContext]", func() {
	f := framework.NewDefaultFramework("security-context")

	It("should support pod.Spec.SecurityContext.SupplementalGroups", func() {
		pod := scTestPod(false, false)
		pod.Spec.Containers[0].Command = []string{"id", "-G"}
		pod.Spec.SecurityContext.SupplementalGroups = []int64{1234, 5678}
		groups := []string{"1234", "5678"}
		f.TestContainerOutput("pod.Spec.SecurityContext.SupplementalGroups", pod, 0, groups)
	})

	It("should support pod.Spec.SecurityContext.RunAsUser", func() {
		pod := scTestPod(false, false)
		var uid int64 = 1001
		pod.Spec.SecurityContext.RunAsUser = &uid
		pod.Spec.Containers[0].Command = []string{"sh", "-c", "id -u"}
Exemplo n.º 19
0
*/

package e2e

import (
	"fmt"
	"net/http"

	. "github.com/onsi/ginkgo"
	"k8s.io/kubernetes/pkg/util/sets"
	"k8s.io/kubernetes/test/e2e/framework"
)

var _ = framework.KubeDescribe("Networking", func() {
	var svcname = "nettest"
	f := framework.NewDefaultFramework(svcname)

	BeforeEach(func() {
		// Assert basic external connectivity.
		// Since this is not really a test of kubernetes in any way, we
		// leave it as a pre-test assertion, rather than a Ginko test.
		By("Executing a successful http request from the external internet")
		resp, err := http.Get("http://google.com")
		if err != nil {
			framework.Failf("Unable to connect/talk to the internet: %v", err)
		}
		if resp.StatusCode != http.StatusOK {
			framework.Failf("Unexpected error code, expected 200, got, %v (%v)", resp.StatusCode, resp)
		}
	})
Exemplo n.º 20
0
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e_node

import (
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/kubernetes/pkg/api/v1"
	"k8s.io/kubernetes/test/e2e/framework"

	. "github.com/onsi/ginkgo"
)

var _ = framework.KubeDescribe("SimpleMount", func() {
	f := framework.NewDefaultFramework("simple-mount-test")

	// This is a very simple test that exercises the Kubelet's mounter code path.
	// If the mount fails, the pod will not be able to run, and CreateSync will timeout.
	It("should be able to mount an emptydir on a container", func() {
		pod := &v1.Pod{
			TypeMeta: metav1.TypeMeta{
				Kind:       "Pod",
				APIVersion: "v1",
			},
			ObjectMeta: metav1.ObjectMeta{
				Name: "simple-mount-pod",
			},
			Spec: v1.PodSpec{
				Containers: []v1.Container{
					{
Exemplo n.º 21
0
type pausePodConfig struct {
	Name                              string
	Affinity                          string
	Annotations, Labels, NodeSelector map[string]string
	Resources                         *api.ResourceRequirements
}

var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
	var c *client.Client
	var cs clientset.Interface
	var nodeList *api.NodeList
	var systemPodsNo int
	var totalPodCapacity int64
	var RCName string
	var ns string
	f := framework.NewDefaultFramework("sched-pred")
	ignoreLabels := framework.ImagePullerLabels

	AfterEach(func() {
		rc, err := c.ReplicationControllers(ns).Get(RCName)
		if err == nil && rc.Spec.Replicas != 0 {
			By("Cleaning up the replication controller")
			err := framework.DeleteRCAndPods(c, f.ClientSet, ns, RCName)
			framework.ExpectNoError(err)
		}
	})

	BeforeEach(func() {
		c = f.Client
		cs = f.ClientSet
		ns = f.Namespace.Name
Exemplo n.º 22
0
package e2e

import (
	"strings"

	"k8s.io/kubernetes/pkg/api/v1"
	"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
	"k8s.io/kubernetes/pkg/metrics"
	"k8s.io/kubernetes/test/e2e/framework"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

var _ = framework.KubeDescribe("MetricsGrabber", func() {
	f := framework.NewDefaultFramework("metrics-grabber")
	var c clientset.Interface
	var grabber *metrics.MetricsGrabber
	BeforeEach(func() {
		var err error
		c = f.ClientSet
		framework.ExpectNoError(err)
		grabber, err = metrics.NewMetricsGrabber(c, true, true, true, true)
		framework.ExpectNoError(err)
	})

	It("should grab all metrics from API server.", func() {
		By("Connecting to /metrics endpoint")
		response, err := grabber.GrabFromApiServer()
		framework.ExpectNoError(err)
		Expect(response).NotTo(BeEmpty())
Exemplo n.º 23
0
				}
			}
		}
		if len(nodeErrs) > 0 {
			errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", ")))
		}
	}
	if len(errList) > 0 {
		framework.Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n"))
	}
}

// Slow by design (1 hour)
var _ = framework.KubeDescribe("Kubelet [Serial] [Slow]", func() {
	var nodeNames sets.String
	f := framework.NewDefaultFramework("kubelet-perf")
	var om *framework.RuntimeOperationMonitor
	var rm *framework.ResourceMonitor

	BeforeEach(func() {
		// Wait until image prepull pod has completed so that they wouldn't
		// affect the runtime cpu usage. Fail the test if prepulling cannot
		// finish in time.
		if err := framework.WaitForPodsSuccess(f.ClientSet, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingLongTimeout); err != nil {
			framework.Failf("Image puller didn't complete in %v, not running resource usage test since the metrics might be adultrated", imagePrePullingLongTimeout)
		}
		nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
		nodeNames = sets.NewString()
		for _, node := range nodes.Items {
			nodeNames.Insert(node.Name)
		}
Exemplo n.º 24
0
	"k8s.io/kubernetes/test/e2e/framework"

	"github.com/davecgh/go-spew/spew"
	"github.com/golang/glog"
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor]", func() {
	if isAppArmorEnabled() {
		BeforeEach(func() {
			By("Loading AppArmor profiles for testing")
			framework.ExpectNoError(loadTestProfiles(), "Could not load AppArmor test profiles")
		})
		Context("when running with AppArmor", func() {
			f := framework.NewDefaultFramework("apparmor-test")

			It("should reject an unloaded profile", func() {
				status := runAppArmorTest(f, false, apparmor.ProfileNamePrefix+"non-existant-profile")
				expectSoftRejection(status)
			})
			It("should enforce a profile blocking writes", func() {
				status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"deny-write")
				if len(status.ContainerStatuses) == 0 {
					framework.Failf("Unexpected pod status: %s", spew.Sdump(status))
					return
				}
				state := status.ContainerStatuses[0].State.Terminated
				Expect(state.ExitCode).To(Not(BeZero()), "ContainerStateTerminated: %+v", state)

			})
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
)

const (
	defaultTimeout   = 3 * time.Minute
	resizeTimeout    = 5 * time.Minute
	scaleUpTimeout   = 5 * time.Minute
	scaleDownTimeout = 15 * time.Minute

	gkeEndpoint      = "https://test-container.sandbox.googleapis.com"
	gkeUpdateTimeout = 15 * time.Minute
)

var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
	f := framework.NewDefaultFramework("autoscaling")
	var c clientset.Interface
	var nodeCount int
	var coresPerNode int
	var memCapacityMb int
	var originalSizes map[string]int

	BeforeEach(func() {
		c = f.ClientSet
		framework.SkipUnlessProviderIs("gce", "gke")

		nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
		nodeCount = len(nodes.Items)
		Expect(nodeCount).NotTo(BeZero())
		cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
		mem := nodes.Items[0].Status.Capacity[v1.ResourceMemory]
Exemplo n.º 26
0
	privilegedUdpPort          = 8081
	notPrivilegedHttpPort      = 9090
	notPrivilegedUdpPort       = 9091
	notPrivilegedContainerName = "not-privileged-container"
	privilegedContainerImage   = "gcr.io/google_containers/netexec:1.4"
	privilegedCommand          = "ip link add dummy1 type dummy"
)

type PrivilegedPodTestConfig struct {
	privilegedPod *api.Pod
	f             *framework.Framework
	hostExecPod   *api.Pod
}

var _ = framework.KubeDescribe("PrivilegedPod", func() {
	f := framework.NewDefaultFramework("e2e-privilegedpod")
	config := &PrivilegedPodTestConfig{
		f: f,
	}
	It("should test privileged pod", func() {
		config.hostExecPod = framework.LaunchHostExecPod(config.f.Client, config.f.Namespace.Name, "hostexec")

		By("Creating a privileged pod")
		config.createPrivilegedPod()

		By("Executing privileged command on privileged container")
		config.runPrivilegedCommandOnPrivilegedContainer()

		By("Executing privileged command on non-privileged container")
		config.runPrivilegedCommandOnNonPrivilegedContainer()
	})
Exemplo n.º 27
0
limitations under the License.
*/

package common

import (
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/kubernetes/pkg/api/v1"
	"k8s.io/kubernetes/pkg/util/uuid"
	"k8s.io/kubernetes/test/e2e/framework"

	. "github.com/onsi/ginkgo"
)

var _ = framework.KubeDescribe("Docker Containers", func() {
	f := framework.NewDefaultFramework("containers")

	It("should use the image defaults if command and args are blank [Conformance]", func() {
		f.TestContainerOutput("use defaults", entrypointTestPod(), 0, []string{
			"[/ep default arguments]",
		})
	})

	It("should be able to override the image's default arguments (docker cmd) [Conformance]", func() {
		pod := entrypointTestPod()
		pod.Spec.Containers[0].Args = []string{"override", "arguments"}

		f.TestContainerOutput("override arguments", pod, 0, []string{
			"[/ep override arguments]",
		})
	})
Exemplo n.º 28
0
	"math"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
	"k8s.io/kubernetes/pkg/api"
	"k8s.io/kubernetes/pkg/api/unversioned"
	client "k8s.io/kubernetes/pkg/client/unversioned"
	"k8s.io/kubernetes/pkg/labels"
	"k8s.io/kubernetes/pkg/util"
	"k8s.io/kubernetes/pkg/util/intstr"
	"k8s.io/kubernetes/pkg/util/sets"
	"k8s.io/kubernetes/test/e2e/framework"
)

var _ = framework.KubeDescribe("Ubernetes Lite", func() {
	f := framework.NewDefaultFramework("ubernetes-lite")
	var zoneCount int
	var err error
	image := "gcr.io/google_containers/serve_hostname:v1.4"
	BeforeEach(func() {
		if zoneCount <= 0 {
			zoneCount, err = getZoneCount(f.Client)
			Expect(err).NotTo(HaveOccurred())
		}
		By(fmt.Sprintf("Checking for multi-zone cluster.  Zone count = %d", zoneCount))
		framework.SkipUnlessAtLeast(zoneCount, 2, "Zone count is %d, only run for multi-zone clusters, skipping test")
		framework.SkipUnlessProviderIs("gce", "gke", "aws")
		// TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread
	})
	It("should spread the pods of a service across zones", func() {
		SpreadServiceOrFail(f, (2*zoneCount)+1, image)
Exemplo n.º 29
0
	"k8s.io/kubernetes/pkg/api/unversioned"
	"k8s.io/kubernetes/pkg/apimachinery/registered"
	"k8s.io/kubernetes/pkg/util"
	"k8s.io/kubernetes/test/e2e/framework"

	. "github.com/onsi/ginkgo"
)

const (
	testImageRootUid    = "gcr.io/google_containers/mounttest:0.5"
	testImageNonRootUid = "gcr.io/google_containers/mounttest-user:0.3"
)

var _ = framework.KubeDescribe("EmptyDir volumes", func() {

	f := framework.NewDefaultFramework("emptydir")

	Context("when FSGroup is specified [Feature:FSGroup]", func() {
		It("new files should be created with FSGroup ownership when container is root", func() {
			doTestSetgidFSGroup(f, testImageRootUid, api.StorageMediumMemory)
		})

		It("new files should be created with FSGroup ownership when container is non-root", func() {
			doTestSetgidFSGroup(f, testImageNonRootUid, api.StorageMediumMemory)
		})

		It("files with FSGroup ownership should support (root,0644,tmpfs)", func() {
			doTest0644FSGroup(f, testImageRootUid, api.StorageMediumMemory)
		})

		It("volume on default medium should have the correct mode using FSGroup", func() {
Exemplo n.º 30
0
	"k8s.io/kubernetes/pkg/api/resource"
	"k8s.io/kubernetes/pkg/api/v1"
	metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
	"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
	"k8s.io/kubernetes/test/e2e/framework"

	systemdutil "github.com/coreos/go-systemd/util"
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
	"github.com/onsi/gomega/gstruct"
	"github.com/onsi/gomega/types"
)

var _ = framework.KubeDescribe("Summary API", func() {
	f := framework.NewDefaultFramework("summary-test")
	Context("when querying /stats/summary", func() {
		AfterEach(func() {
			if CurrentGinkgoTestDescription().Failed && framework.TestContext.DumpLogsOnFailure {
				framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
			}
		})
		It("should report resource usage through the stats api", func() {
			const pod0 = "stats-busybox-0"
			const pod1 = "stats-busybox-1"

			By("Creating test pods")
			createSummaryTestPods(f, pod0, pod1)
			// Wait for cAdvisor to collect 2 stats points
			time.Sleep(15 * time.Second)