Esempio n. 1
0
func TestOVirtCloudConfiguration(t *testing.T) {
	config1 := (io.Reader)(nil)

	_, err1 := cloudprovider.GetCloudProvider("ovirt", config1)
	if err1 == nil {
		t.Fatalf("An error is expected when the configuration is missing")
	}

	config2 := strings.NewReader("")

	_, err2 := cloudprovider.GetCloudProvider("ovirt", config2)
	if err2 == nil {
		t.Fatalf("An error is expected when the configuration is empty")
	}

	config3 := strings.NewReader(`
[connection]
	`)

	_, err3 := cloudprovider.GetCloudProvider("ovirt", config3)
	if err3 == nil {
		t.Fatalf("An error is expected when the uri is missing")
	}

	config4 := strings.NewReader(`
[connection]
uri = https://localhost:8443/ovirt-engine/api
`)

	_, err4 := cloudprovider.GetCloudProvider("ovirt", config4)
	if err4 != nil {
		t.Fatalf("Unexpected error creating the provider: %s", err4)
	}
}
Esempio n. 2
0
// NewClusterManager creates a cluster manager for shared resources.
// - name: is the name used to tag cluster wide shared resources. This is the
//   string passed to glbc via --gce-cluster-name.
// - defaultBackendNodePort: is the node port of glbc's default backend. This is
//	 the kubernetes Service that serves the 404 page if no urls match.
// - defaultHealthCheckPath: is the default path used for L7 health checks, eg: "/healthz"
func NewClusterManager(
	name string,
	defaultBackendNodePort int64,
	defaultHealthCheckPath string) (*ClusterManager, error) {

	cloudInterface, err := cloudprovider.GetCloudProvider("gce", nil)
	if err != nil {
		return nil, err
	}
	cloud := cloudInterface.(*gce.GCECloud)
	cluster := ClusterManager{ClusterNamer: utils.Namer{name}}
	zone, err := cloud.GetZone()
	if err != nil {
		return nil, err
	}
	cluster.instancePool = instances.NewNodePool(cloud, zone.FailureDomain)
	healthChecker := healthchecks.NewHealthChecker(cloud, defaultHealthCheckPath, cluster.ClusterNamer)
	cluster.backendPool = backends.NewBackendPool(
		cloud, healthChecker, cluster.instancePool, cluster.ClusterNamer)
	defaultBackendHealthChecker := healthchecks.NewHealthChecker(cloud, "/healthz", cluster.ClusterNamer)
	defaultBackendPool := backends.NewBackendPool(
		cloud, defaultBackendHealthChecker, cluster.instancePool, cluster.ClusterNamer)
	cluster.defaultBackendNodePort = defaultBackendNodePort
	cluster.l7Pool = loadbalancers.NewLoadBalancerPool(
		cloud, defaultBackendPool, defaultBackendNodePort, cluster.ClusterNamer)
	return &cluster, nil
}
Esempio n. 3
0
func getGCEClient(config io.Reader) *gce.GCECloud {
	// Creating the cloud interface involves resolving the metadata server to get
	// an oauth token. If this fails, the token provider assumes it's not on GCE.
	// No errors are thrown. So we need to keep retrying till it works because
	// we know we're on GCE.
	for {
		cloudInterface, err := cloudprovider.GetCloudProvider("gce", config)
		if err == nil {
			cloud := cloudInterface.(*gce.GCECloud)

			// If this controller is scheduled on a node without compute/rw
			// it won't be allowed to list backends. We can assume that the
			// user has no need for Ingress in this case. If they grant
			// permissions to the node they will have to restart the controller
			// manually to re-create the client.
			if _, err = cloud.ListBackendServices(); err == nil || utils.IsHTTPErrorCode(err, http.StatusForbidden) {
				return cloud
			}
			glog.Warningf("Failed to list backend services, retrying: %v", err)
		} else {
			glog.Warningf("Failed to retrieve cloud interface, retrying: %v", err)
		}
		time.Sleep(cloudClientRetryInterval)
	}
}
Esempio n. 4
0
// Return cloud provider
func getCloudProvider() (*aws.AWSCloud, error) {
	awsCloudProvider, err := cloudprovider.GetCloudProvider("aws", nil)
	if err != nil || awsCloudProvider == nil {
		return nil, err
	}

	// The conversion must be safe otherwise bug in GetCloudProvider()
	return awsCloudProvider.(*aws.AWSCloud), nil
}
Esempio n. 5
0
// Return cloud provider
func getCloudProvider() (*gcecloud.GCECloud, error) {
	gceCloudProvider, err := cloudprovider.GetCloudProvider("gce", nil)
	if err != nil || gceCloudProvider == nil {
		return nil, err
	}

	// The conversion must be safe otherwise bug in GetCloudProvider()
	return gceCloudProvider.(*gcecloud.GCECloud), nil
}
Esempio n. 6
0
func TestE2E(t *testing.T) {
	util.ReallyCrash = true
	util.InitLogs()
	defer util.FlushLogs()
	if *reportDir != "" {
		if err := os.MkdirAll(*reportDir, 0755); err != nil {
			glog.Errorf("Failed creating report directory: %v", err)
		}
		defer CoreDump(*reportDir)
	}

	if testContext.Provider == "" {
		glog.Info("The --provider flag is not set.  Treating as a conformance test.  Some tests may not be run.")
	}

	if testContext.Provider == "aws" {
		awsConfig := "[Global]\n"
		if cloudConfig.Zone == "" {
			glog.Fatal("gce-zone must be specified for AWS")
		}
		awsConfig += fmt.Sprintf("Zone=%s\n", cloudConfig.Zone)

		if cloudConfig.ClusterTag == "" {
			glog.Fatal("--cluster-tag must be specified for AWS")
		}
		awsConfig += fmt.Sprintf("KubernetesClusterTag=%s\n", cloudConfig.ClusterTag)

		var err error
		cloudConfig.Provider, err = cloudprovider.GetCloudProvider(testContext.Provider, strings.NewReader(awsConfig))
		if err != nil {
			glog.Fatal("Error building AWS provider: ", err)
		}
	}

	// Disable skipped tests unless they are explicitly requested.
	if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
		config.GinkgoConfig.SkipString = "Skipped"
	}
	gomega.RegisterFailHandler(ginkgo.Fail)

	// Ensure all pods are running and ready before starting tests (otherwise,
	// cluster infrastructure pods that are being pulled or started can block
	// test pods from running, and tests that ensure all pods are running and
	// ready will fail).
	if err := waitForPodsRunningReady(api.NamespaceSystem, testContext.MinStartupPods, podStartupTimeout); err != nil {
		t.Errorf("Error waiting for all pods to be running and ready: %v", err)
		return
	}
	// Run tests through the Ginkgo runner with output to console + JUnit for Jenkins
	var r []ginkgo.Reporter
	if *reportDir != "" {
		r = append(r, reporters.NewJUnitReporter(path.Join(*reportDir, fmt.Sprintf("junit_%02d.xml", config.GinkgoConfig.ParallelNode))))
	}
	ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r)
}
Esempio n. 7
0
// getVolumeProvider returns the AWS Volumes interface
func (ebs *awsElasticBlockStore) getVolumeProvider() (aws_cloud.Volumes, error) {
	name := "aws"
	cloud, err := cloudprovider.GetCloudProvider(name, nil)
	if err != nil {
		return nil, err
	}
	volumes, ok := cloud.(aws_cloud.Volumes)
	if !ok {
		return nil, fmt.Errorf("Cloud provider does not support volumes")
	}
	return volumes, nil
}
Esempio n. 8
0
// NewClusterManager creates a cluster manager for shared resources.
// - name: is the name used to tag cluster wide shared resources. This is the
//   string passed to glbc via --gce-cluster-name.
// - defaultBackendNodePort: is the node port of glbc's default backend. This is
//	 the kubernetes Service that serves the 404 page if no urls match.
// - defaultHealthCheckPath: is the default path used for L7 health checks, eg: "/healthz"
func NewClusterManager(name string, defaultBackendNodePort int64, defaultHealthCheckPath string) (*ClusterManager, error) {
	cloudInterface, err := cloudprovider.GetCloudProvider("gce", nil)
	if err != nil {
		return nil, err
	}
	cloud := cloudInterface.(*gce.GCECloud)
	cluster := ClusterManager{ClusterName: name}

	// Why do we need so many defaults?
	// Default IG: We add all instances to a single ig, and
	// every service that requires loadbalancing opens up
	// a nodePort on the cluster, which translates to a node
	// on this default ig.
	//
	// Default Backend: We need a default backend to create
	// every urlmap, even if the user doesn't specify one.
	// This is the backend that gets requests if no paths match.
	// Note that this backend doesn't actually occupy a port
	// on the instance group.
	//
	// Default Health Check: The default backend used by an
	// Ingress that doesn't specify it.

	defaultIGName := defaultInstanceGroupName(name)
	if cluster.instancePool, err = NewNodePool(cloud, defaultIGName); err != nil {
		return nil, err
	}

	// TODO: We're roud tripping for a resource we just created.
	defaultIG, err := cluster.instancePool.Get(defaultIGName)
	if err != nil {
		return nil, err
	}
	if cluster.healthChecker, err = NewHealthChecker(
		cloud, defaultHttpHealthCheck, defaultHealthCheckPath); err != nil {
		return nil, err
	}
	defaultHc, err := cluster.healthChecker.Get(defaultHttpHealthCheck)
	if err != nil {
		return nil, err
	}
	if cluster.backendPool, err = NewBackendPool(
		cloud, defaultBackendNodePort, defaultIG, defaultHc, cloud); err != nil {
		return nil, err
	}
	cluster.defaultBackendNodePort = defaultBackendNodePort
	// TODO: Don't cast, the problem here is the default backend doesn't have
	// a port and the interface only allows backend access via port.
	cluster.l7Pool = NewLoadBalancerPool(
		cloud, cluster.backendPool.(*Backends).defaultBackend)
	return &cluster, nil
}
Esempio n. 9
0
// Attaches the specified persistent disk device to node, verifies that it is attached, and retries if it fails.
func attachDiskAndVerify(b *gcePersistentDiskBuilder, sdBeforeSet sets.String) (string, error) {
	devicePaths := getDiskByIdPaths(b.gcePersistentDisk)
	var gce cloudprovider.Interface
	for numRetries := 0; numRetries < maxRetries; numRetries++ {
		if gce == nil {
			var err error
			gce, err = cloudprovider.GetCloudProvider("gce", nil)
			if err != nil || gce == nil {
				// Retry on error. See issue #11321
				glog.Errorf("Error getting GCECloudProvider while attaching PD %q: %v", b.pdName, err)
				gce = nil
				time.Sleep(errorSleepDuration)
				continue
			}
		}

		if numRetries > 0 {
			glog.Warningf("Timed out waiting for GCE PD %q to attach. Retrying attach.", b.pdName)
		}

		if err := gce.(*gce_cloud.GCECloud).AttachDisk(b.pdName, b.readOnly); err != nil {
			// Retry on error. See issue #11321. Continue and verify if disk is attached, because a
			// previous attach operation may still succeed.
			glog.Errorf("Error attaching PD %q: %v", b.pdName, err)
		}

		for numChecks := 0; numChecks < maxChecks; numChecks++ {
			if err := udevadmChangeToNewDrives(sdBeforeSet); err != nil {
				// udevadm errors should not block disk attachment, log and continue
				glog.Errorf("%v", err)
			}

			for _, path := range devicePaths {
				if pathExists, err := pathExists(path); err != nil {
					// Retry on error. See issue #11321
					glog.Errorf("Error checking if path exists: %v", err)
				} else if pathExists {
					// A device path has successfully been created for the PD
					glog.Infof("Successfully attached GCE PD %q.", b.pdName)
					return path, nil
				}
			}

			// Sleep then check again
			glog.V(3).Infof("Waiting for GCE PD %q to attach.", b.pdName)
			time.Sleep(checkSleepDuration)
		}
	}

	return "", fmt.Errorf("Could not attach GCE PD %q. Timeout waiting for mount paths to be created.", b.pdName)
}
Esempio n. 10
0
// setupProviderConfig validates and sets up cloudConfig based on testContext.Provider.
func setupProviderConfig() error {
	switch testContext.Provider {
	case "":
		glog.Info("The --provider flag is not set.  Treating as a conformance test.  Some tests may not be run.")

	case "gce", "gke":
		var err error
		Logf("Fetching cloud provider for %q\r\n", testContext.Provider)
		var tokenSource oauth2.TokenSource
		tokenSource = nil
		if cloudConfig.ServiceAccount != "" {
			// Use specified service account for auth
			Logf("Using service account %q as token source.", cloudConfig.ServiceAccount)
			tokenSource = google.ComputeTokenSource(cloudConfig.ServiceAccount)
		}
		zone := testContext.CloudConfig.Zone
		region, err := gcecloud.GetGCERegion(zone)
		if err != nil {
			return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
		}
		managedZones := []string{zone} // Only single-zone for now
		cloudConfig.Provider, err = gcecloud.CreateGCECloud(testContext.CloudConfig.ProjectID, region, zone, managedZones, "" /* networkUrl */, tokenSource, false /* useMetadataServer */)
		if err != nil {
			return fmt.Errorf("Error building GCE/GKE provider: %v", err)
		}

	case "aws":
		awsConfig := "[Global]\n"
		if cloudConfig.Zone == "" {
			return fmt.Errorf("gce-zone must be specified for AWS")
		}
		awsConfig += fmt.Sprintf("Zone=%s\n", cloudConfig.Zone)

		if cloudConfig.ClusterTag == "" {
			return fmt.Errorf("--cluster-tag must be specified for AWS")
		}
		awsConfig += fmt.Sprintf("KubernetesClusterTag=%s\n", cloudConfig.ClusterTag)

		var err error
		cloudConfig.Provider, err = cloudprovider.GetCloudProvider(testContext.Provider, strings.NewReader(awsConfig))
		if err != nil {
			return fmt.Errorf("Error building AWS provider: %v", err)
		}

	}

	return nil
}
Esempio n. 11
0
// getGCECloudProvider returns the GCE cloud provider, for use for querying volume labels
func (l *persistentVolumeLabel) getGCECloudProvider() (*gce.GCECloud, error) {
	l.mutex.Lock()
	defer l.mutex.Unlock()

	if l.gceCloudProvider == nil {
		cloudProvider, err := cloudprovider.GetCloudProvider("gce", nil)
		if err != nil || cloudProvider == nil {
			return nil, err
		}
		gceCloudProvider, ok := cloudProvider.(*gce.GCECloud)
		if !ok {
			// GetCloudProvider has gone very wrong
			return nil, fmt.Errorf("error retrieving GCE cloud provider")
		}
		l.gceCloudProvider = gceCloudProvider
	}
	return l.gceCloudProvider, nil
}
Esempio n. 12
0
// getEBSVolumes returns the AWS Volumes interface for ebs
func (l *persistentVolumeLabel) getEBSVolumes() (aws.Volumes, error) {
	l.mutex.Lock()
	defer l.mutex.Unlock()

	if l.ebsVolumes == nil {
		cloudProvider, err := cloudprovider.GetCloudProvider("aws", nil)
		if err != nil || cloudProvider == nil {
			return nil, err
		}
		awsCloudProvider, ok := cloudProvider.(*aws.AWSCloud)
		if !ok {
			// GetCloudProvider has gone very wrong
			return nil, fmt.Errorf("error retrieving AWS cloud provider")
		}
		l.ebsVolumes = awsCloudProvider
	}
	return l.ebsVolumes, nil
}
// NewClusterManager creates a cluster manager for shared resources.
// - name: is the name used to tag cluster wide shared resources. This is the
//   string passed to glbc via --gce-cluster-name.
// - defaultBackendNodePort: is the node port of glbc's default backend. This is
//	 the kubernetes Service that serves the 404 page if no urls match.
// - defaultHealthCheckPath: is the default path used for L7 health checks, eg: "/healthz"
func NewClusterManager(
	name string,
	defaultBackendNodePort int64,
	defaultHealthCheckPath string) (*ClusterManager, error) {

	cloudInterface, err := cloudprovider.GetCloudProvider("gce", nil)
	if err != nil {
		return nil, err
	}
	cloud := cloudInterface.(*gce.GCECloud)
	cluster := ClusterManager{ClusterName: name}
	cluster.instancePool = NewNodePool(cloud)
	healthChecker := NewHealthChecker(cloud, defaultHealthCheckPath)
	cluster.backendPool = NewBackendPool(
		cloud, healthChecker, cluster.instancePool)
	defaultBackendHealthChecker := NewHealthChecker(cloud, "/healthz")
	defaultBackendPool := NewBackendPool(
		cloud, defaultBackendHealthChecker, cluster.instancePool)
	cluster.defaultBackendNodePort = defaultBackendNodePort
	cluster.l7Pool = NewLoadBalancerPool(
		cloud, defaultBackendPool, defaultBackendNodePort)
	return &cluster, nil
}
Esempio n. 14
0
// Enabled implements DockerConfigProvider.Enabled for the AWS token-based implementation.
// For now, it gets activated only if AWS was chosen as the cloud provider.
// TODO: figure how to enable it manually for deployments that are not on AWS but still
// use ECR somehow?
func (p *ecrProvider) Enabled() bool {
	provider, err := cloudprovider.GetCloudProvider(aws_cloud.ProviderName, nil)
	if err != nil {
		glog.Errorf("while initializing AWS cloud provider %v", err)
		return false
	}
	if provider == nil {
		return false
	}

	zones, ok := provider.Zones()
	if !ok {
		glog.Errorf("couldn't get Zones() interface")
		return false
	}
	zone, err := zones.GetZone()
	if err != nil {
		glog.Errorf("while getting zone %v", err)
		return false
	}
	if zone.Region == "" {
		glog.Errorf("Region information is empty")
		return false
	}

	getter := &ecrTokenGetter{svc: ecr.New(session.New(&aws.Config{
		Credentials: nil,
		Region:      &zone.Region,
	}))}
	getter.svc.Handlers.Sign.PushFrontNamed(request.NamedHandler{
		Name: "k8s/logger",
		Fn:   awsHandlerLogger,
	})
	p.getter = getter

	return true
}
Esempio n. 15
0
// Detaches the specified persistent disk device from node, verifies that it is detached, and retries if it fails.
// This function is intended to be called asynchronously as a go routine.
// It starts the detachCleanupManager with the specified pdName so that callers can wait for completion.
func detachDiskAndVerify(c *gcePersistentDiskCleaner) {
	glog.V(5).Infof("detachDiskAndVerify for pd %q.", c.pdName)
	defer util.HandleCrash()

	// Start operation, so that other threads can wait on this detach operation.
	// Set bufferSize to 0 so senders are blocked on send until we receive.
	ch, err := detachCleanupManager.Start(c.pdName, 0 /* bufferSize */)
	if err != nil {
		glog.Errorf("Error adding %q to detachCleanupManager: %v", c.pdName, err)
		return
	}

	defer detachCleanupManager.Close(c.pdName)

	defer func() {
		// Unblock any callers that have been waiting for this detach routine to complete.
		for {
			select {
			case <-ch:
				glog.V(5).Infof("detachDiskAndVerify for pd %q clearing chan.", c.pdName)
			default:
				glog.V(5).Infof("detachDiskAndVerify for pd %q done clearing chans.", c.pdName)
				return
			}
		}
	}()

	devicePaths := getDiskByIdPaths(c.gcePersistentDisk)
	var gce cloudprovider.Interface
	for numRetries := 0; numRetries < maxRetries; numRetries++ {
		if gce == nil {
			var err error
			gce, err = cloudprovider.GetCloudProvider("gce", nil)
			if err != nil || gce == nil {
				// Retry on error. See issue #11321
				glog.Errorf("Error getting GCECloudProvider while detaching PD %q: %v", c.pdName, err)
				gce = nil
				time.Sleep(errorSleepDuration)
				continue
			}
		}

		if numRetries > 0 {
			glog.Warningf("Timed out waiting for GCE PD %q to detach. Retrying detach.", c.pdName)
		}

		if err := gce.(*gce_cloud.GCECloud).DetachDisk(c.pdName); err != nil {
			// Retry on error. See issue #11321. Continue and verify if disk is detached, because a
			// previous detach operation may still succeed.
			glog.Errorf("Error detaching PD %q: %v", c.pdName, err)
		}

		for numChecks := 0; numChecks < maxChecks; numChecks++ {
			allPathsRemoved := true
			for _, path := range devicePaths {
				if err := udevadmChangeToDrive(path); err != nil {
					// udevadm errors should not block disk detachment, log and continue
					glog.Errorf("%v", err)
				}
				if exists, err := pathExists(path); err != nil {
					// Retry on error. See issue #11321
					glog.Errorf("Error checking if path exists: %v", err)
				} else {
					allPathsRemoved = allPathsRemoved && !exists
				}
			}
			if allPathsRemoved {
				// All paths to the PD have been succefully removed
				glog.Infof("Successfully detached GCE PD %q.", c.pdName)
				return
			}

			// Sleep then check again
			glog.V(3).Infof("Waiting for GCE PD %q to detach.", c.pdName)
			time.Sleep(checkSleepDuration)
		}

	}

	glog.Errorf("Failed to detach GCE PD %q. One or more mount paths was not removed.", c.pdName)
}
Esempio n. 16
0
func TestE2E(t *testing.T) {
	util.ReallyCrash = true
	util.InitLogs()
	defer util.FlushLogs()
	if *reportDir != "" {
		if err := os.MkdirAll(*reportDir, 0755); err != nil {
			glog.Errorf("Failed creating report directory: %v", err)
		}
		defer CoreDump(*reportDir)
	}

	if testContext.Provider == "" {
		glog.Info("The --provider flag is not set.  Treating as a conformance test.  Some tests may not be run.")
	}

	if testContext.Provider == "gce" || testContext.Provider == "gke" {
		var err error
		Logf("Fetching cloud provider for %q\r\n", testContext.Provider)
		var tokenSource oauth2.TokenSource
		tokenSource = nil
		if cloudConfig.ServiceAccount != "" {
			// Use specified service account for auth
			Logf("Using service account %q as token source.", cloudConfig.ServiceAccount)
			tokenSource = google.ComputeTokenSource(cloudConfig.ServiceAccount)
		}
		cloudConfig.Provider, err = gcecloud.CreateGCECloud(testContext.CloudConfig.ProjectID, testContext.CloudConfig.Zone, "" /* networkUrl */, tokenSource, false /* useMetadataServer */)
		if err != nil {
			glog.Fatal("Error building GCE provider: ", err)
		}

	}

	if testContext.Provider == "aws" {
		awsConfig := "[Global]\n"
		if cloudConfig.Zone == "" {
			glog.Fatal("gce-zone must be specified for AWS")
		}
		awsConfig += fmt.Sprintf("Zone=%s\n", cloudConfig.Zone)

		if cloudConfig.ClusterTag == "" {
			glog.Fatal("--cluster-tag must be specified for AWS")
		}
		awsConfig += fmt.Sprintf("KubernetesClusterTag=%s\n", cloudConfig.ClusterTag)

		var err error
		cloudConfig.Provider, err = cloudprovider.GetCloudProvider(testContext.Provider, strings.NewReader(awsConfig))
		if err != nil {
			glog.Fatal("Error building AWS provider: ", err)
		}
	}

	// Disable skipped tests unless they are explicitly requested.
	if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
		config.GinkgoConfig.SkipString = `\[Skipped\]`
	}
	gomega.RegisterFailHandler(ginkgo.Fail)

	c, err := loadClient()
	if err != nil {
		glog.Fatal("Error loading client: ", err)
	}

	// Delete any namespaces except default and kube-system. This ensures no
	// lingering resources are left over from a previous test run.
	if testContext.CleanStart {
		deleted, err := deleteNamespaces(c, nil /* deleteFilter */, []string{api.NamespaceSystem, api.NamespaceDefault})
		if err != nil {
			t.Errorf("Error deleting orphaned namespaces: %v", err)
		}
		glog.Infof("Waiting for deletion of the following namespaces: %v", deleted)
		if err := waitForNamespacesDeleted(c, deleted, namespaceCleanupTimeout); err != nil {
			glog.Fatalf("Failed to delete orphaned namespaces %v: %v", deleted, err)
		}
	}

	// Ensure all pods are running and ready before starting tests (otherwise,
	// cluster infrastructure pods that are being pulled or started can block
	// test pods from running, and tests that ensure all pods are running and
	// ready will fail).
	if err := waitForPodsRunningReady(api.NamespaceSystem, testContext.MinStartupPods, podStartupTimeout); err != nil {
		t.Errorf("Error waiting for all pods to be running and ready: %v", err)
		return
	}
	// Run tests through the Ginkgo runner with output to console + JUnit for Jenkins
	var r []ginkgo.Reporter
	if *reportDir != "" {
		r = append(r, reporters.NewJUnitReporter(path.Join(*reportDir, fmt.Sprintf("junit_%02d.xml", config.GinkgoConfig.ParallelNode))))
	}
	glog.Infof("Starting e2e run; %q", runId)
	ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r)
}