예제 #1
0
func TestOVirtCloudConfiguration(t *testing.T) {
	config1 := (io.Reader)(nil)

	_, err1 := cloudprovider.GetCloudProvider("ovirt", config1)
	if err1 == nil {
		t.Fatalf("An error is expected when the configuration is missing")
	}

	config2 := strings.NewReader("")

	_, err2 := cloudprovider.GetCloudProvider("ovirt", config2)
	if err2 == nil {
		t.Fatalf("An error is expected when the configuration is empty")
	}

	config3 := strings.NewReader(`
[connection]
	`)

	_, err3 := cloudprovider.GetCloudProvider("ovirt", config3)
	if err3 == nil {
		t.Fatalf("An error is expected when the uri is missing")
	}

	config4 := strings.NewReader(`
[connection]
uri = https://localhost:8443/ovirt-engine/api
`)

	_, err4 := cloudprovider.GetCloudProvider("ovirt", config4)
	if err4 != nil {
		t.Fatalf("Unexpected error creating the provider: %s", err4)
	}
}
예제 #2
0
// Unmounts the device and detaches the disk from the kubelet's host machine.
// Expects a GCE device path symlink. Ex: /dev/disk/by-id/google-mydisk-part1
func (util *GCEDiskUtil) DetachDisk(pd *gcePersistentDisk, devicePath string) error {
	// Follow the symlink to the actual device path.
	canonicalDevicePath, err := filepath.EvalSymlinks(devicePath)
	if err != nil {
		return err
	}
	deviceName, err := getDeviceName(devicePath, canonicalDevicePath)
	if err != nil {
		return err
	}
	globalPDPath := makeGlobalPDName(pd.plugin.host, deviceName, pd.readOnly)
	if err := pd.mounter.Unmount(globalPDPath, 0); err != nil {
		return err
	}
	if err := os.RemoveAll(globalPDPath); err != nil {
		return err
	}
	gce, err := cloudprovider.GetCloudProvider("gce", nil)
	if err != nil {
		return err
	}
	if err := gce.(*gce_cloud.GCECloud).DetachDisk(deviceName); err != nil {
		return err
	}
	return nil
}
예제 #3
0
// Unmounts the device and detaches the disk from the kubelet's host machine.
func (util *GCEDiskUtil) DetachDisk(pd *gcePersistentDisk) error {
	// Unmount the global PD mount, which should be the only one.
	globalPDPath := makeGlobalPDName(pd.plugin.host, pd.pdName)
	glog.V(5).Infof("DetachDisk(pd) where pd is %#v and the globalPDPath is %q\r\n", pd, globalPDPath)

	// Terminate any in progress verify detach go routines, this will block until the goroutine is ready to exit because the channel is unbuffered
	detachCleanupManager.Send(pd.pdName, true)

	if err := pd.mounter.Unmount(globalPDPath); err != nil {
		return err
	}
	if err := os.Remove(globalPDPath); err != nil {
		return err
	}
	// Detach the disk
	gce, err := cloudprovider.GetCloudProvider("gce", nil)
	if err != nil {
		return err
	}
	if err := gce.(*gce_cloud.GCECloud).DetachDisk(pd.pdName); err != nil {
		return err
	}

	// Verify disk detached, retry if needed.
	go verifyDetached(pd, gce)
	return nil
}
예제 #4
0
func initCloudProvider(name string, configFilePath string) cloudprovider.Interface {
	var config *os.File

	if name == "" {
		glog.Info("No cloud provider specified.")
		return nil
	}

	if configFilePath != "" {
		var err error

		config, err = os.Open(configFilePath)
		if err != nil {
			glog.Fatalf("Couldn't open cloud provider configuration %s: %#v",
				configFilePath, err)
		}

		defer config.Close()
	}

	cloud, err := cloudprovider.GetCloudProvider(name, config)
	if err != nil {
		glog.Fatalf("Couldn't init cloud provider %q: %#v", name, err)
	}
	if cloud == nil {
		glog.Fatalf("Unknown cloud provider: %s", name)
	}

	return cloud
}
예제 #5
0
func main() {
	util.InitFlags()
	goruntime.GOMAXPROCS(goruntime.NumCPU())
	if context.Provider == "" {
		glog.Info("The --provider flag is not set.  Treating as a conformance test.  Some tests may not be run.")
		os.Exit(1)
	}
	if *times <= 0 {
		glog.Error("Invalid --times (negative or no testing requested)!")
		os.Exit(1)
	}

	if context.Provider == "aws" {
		awsConfig := "[Global]\n"
		if cloudConfig.Zone == "" {
			glog.Error("gce_zone must be specified for AWS")
			os.Exit(1)
		}
		awsConfig += fmt.Sprintf("Zone=%s\n", cloudConfig.Zone)

		var err error
		cloudConfig.Provider, err = cloudprovider.GetCloudProvider(context.Provider, strings.NewReader(awsConfig))
		if err != nil {
			glog.Error("Error building AWS provider: ", err)
			os.Exit(1)
		}
	}

	e2e.RunE2ETests(context, *orderseed, *times, *reportDir, testList)
}
예제 #6
0
func TestE2E(t *testing.T) {
	util.ReallyCrash = true
	util.InitLogs()
	defer util.FlushLogs()

	// TODO: possibly clean up or refactor this functionality.
	if testContext.Provider == "" {
		glog.Fatal("The --provider flag is not set.  Treating as a conformance test.  Some tests may not be run.")
	}

	if testContext.Provider == "aws" {
		awsConfig := "[Global]\n"
		if cloudConfig.Zone == "" {
			glog.Fatal("gce-zone must be specified for AWS")
		}
		awsConfig += fmt.Sprintf("Zone=%s\n", cloudConfig.Zone)

		if cloudConfig.ClusterTag == "" {
			glog.Fatal("--cluster-tag must be specified for AWS")
		}
		awsConfig += fmt.Sprintf("KubernetesClusterTag=%s\n", cloudConfig.ClusterTag)

		var err error
		cloudConfig.Provider, err = cloudprovider.GetCloudProvider(testContext.Provider, strings.NewReader(awsConfig))
		if err != nil {
			glog.Fatal("Error building AWS provider: ", err)
		}
	}

	// Disable density test unless it's explicitly requested.
	if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
		config.GinkgoConfig.SkipString = "Skipped"
	}
	gomega.RegisterFailHandler(ginkgo.Fail)

	// Ensure all pods are running and ready before starting tests (otherwise,
	// cluster infrastructure pods that are being pulled or started can block
	// test pods from running, and tests that ensure all pods are running and
	// ready will fail).
	if err := waitForPodsRunningReady(api.NamespaceDefault, testContext.MinStartupPods, podStartupTimeout); err != nil {
		glog.Fatalf("Error waiting for all pods to be running and ready: %v", err)
	}
	// Run tests through the Ginkgo runner with output to console + JUnit for Jenkins
	var r []ginkgo.Reporter
	if *reportDir != "" {
		r = append(r, reporters.NewJUnitReporter(path.Join(*reportDir, fmt.Sprintf("junit_%02d.xml", config.GinkgoConfig.ParallelNode))))
		failReport := &failReporter{}
		r = append(r, failReport)
		defer func() {
			if failReport.failed {
				coreDump(*reportDir)
			}
		}()
	}
	ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r)
}
예제 #7
0
func main() {
	flag.Parse()
	util.InitLogs()
	defer util.FlushLogs()

	verflag.PrintAndExitIfRequested()
	verifyMinionFlags()

	if len(etcdServerList) == 0 {
		glog.Fatalf("-etcd_servers flag is required.")
	}

	cloud, err := cloudprovider.GetCloudProvider(*cloudProvider)
	if err != nil {
		glog.Fatalf("Couldn't init cloud provider %q: %#v", *cloudProvider, err)
	}
	if cloud == nil {
		if len(*cloudProvider) > 0 {
			glog.Fatalf("Unknown cloud provider: %s", *cloudProvider)
		} else {
			glog.Info("No cloud provider specified.")
		}
	}

	podInfoGetter := &client.HTTPPodInfoGetter{
		Client: http.DefaultClient,
		Port:   *minionPort,
	}

	client, err := client.New(net.JoinHostPort(*address, strconv.Itoa(int(*port))), nil)
	if err != nil {
		glog.Fatalf("Invalid server address: %v", err)
	}

	m := master.New(&master.Config{
		Client:             client,
		Cloud:              cloud,
		EtcdServers:        etcdServerList,
		HealthCheckMinions: *healthCheckMinions,
		Minions:            machineList,
		MinionCacheTTL:     *minionCacheTTL,
		MinionRegexp:       *minionRegexp,
		PodInfoGetter:      podInfoGetter,
	})

	storage, codec := m.API_v1beta1()
	s := &http.Server{
		Addr:           net.JoinHostPort(*address, strconv.Itoa(int(*port))),
		Handler:        apiserver.Handle(storage, codec, *apiPrefix),
		ReadTimeout:    5 * time.Minute,
		WriteTimeout:   5 * time.Minute,
		MaxHeaderBytes: 1 << 20,
	}
	glog.Fatal(s.ListenAndServe())
}
예제 #8
0
// Attaches a disk specified by a volume.GCEPersistentDisk to the current kubelet.
// Mounts the disk to it's global path.
func (util *GCEDiskUtil) AttachDisk(pd *gcePersistentDisk) error {
	gce, err := cloudprovider.GetCloudProvider("gce", nil)
	if err != nil {
		return err
	}
	flags := uintptr(0)
	if pd.readOnly {
		flags = mount.FlagReadOnly
	}
	if err := gce.(*gce_cloud.GCECloud).AttachDisk(pd.pdName, pd.readOnly); err != nil {
		return err
	}
	devicePath := path.Join("/dev/disk/by-id/", "google-"+pd.pdName)
	if pd.partition != "" {
		devicePath = devicePath + "-part" + pd.partition
	}
	//TODO(jonesdl) There should probably be better method than busy-waiting here.
	numTries := 0
	for {
		_, err := os.Stat(devicePath)
		if err == nil {
			break
		}
		if err != nil && !os.IsNotExist(err) {
			return err
		}
		numTries++
		if numTries == 10 {
			return errors.New("Could not attach disk: Timeout after 10s")
		}
		time.Sleep(time.Second)
	}
	globalPDPath := makeGlobalPDName(pd.plugin.host, pd.pdName, pd.readOnly)
	// Only mount the PD globally once.
	mountpoint, err := isMountPoint(globalPDPath)
	if err != nil {
		if os.IsNotExist(err) {
			if err := os.MkdirAll(globalPDPath, 0750); err != nil {
				return err
			}
			mountpoint = false
		} else {
			return err
		}
	}
	if !mountpoint {
		err = pd.mounter.Mount(devicePath, globalPDPath, pd.fsType, flags, "")
		if err != nil {
			os.RemoveAll(globalPDPath)
			return err
		}
	}
	return nil
}
예제 #9
0
파일: aws_ebs.go 프로젝트: Ima8/kubernetes
// getVolumeProvider returns the AWS Volumes interface
func (ebs *awsElasticBlockStore) getVolumeProvider() (aws_cloud.Volumes, error) {
	name := "aws"
	cloud, err := cloudprovider.GetCloudProvider(name, nil)
	if err != nil {
		return nil, err
	}
	volumes, ok := cloud.(aws_cloud.Volumes)
	if !ok {
		return nil, fmt.Errorf("Cloud provider does not support volumes")
	}
	return volumes, nil
}
예제 #10
0
// Attaches the specified persistent disk device to node, verifies that it is attached, and retries if it fails.
func attachDiskAndVerify(pd *gcePersistentDisk, sdBeforeSet util.StringSet) (string, error) {
	devicePaths := getDiskByIdPaths(pd)
	var gce cloudprovider.Interface
	for numRetries := 0; numRetries < maxRetries; numRetries++ {
		if gce == nil {
			var err error
			gce, err = cloudprovider.GetCloudProvider("gce", nil)
			if err != nil || gce == nil {
				// Retry on error. See issue #11321
				glog.Errorf("Error getting GCECloudProvider while attaching PD %q: %v", pd.pdName, err)
				gce = nil
				time.Sleep(errorSleepDuration)
				continue
			}
		}

		if numRetries > 0 {
			glog.Warningf("Timed out waiting for GCE PD %q to attach. Retrying attach.", pd.pdName)
		}

		if err := gce.(*gce_cloud.GCECloud).AttachDisk(pd.pdName, pd.readOnly); err != nil {
			// Retry on error. See issue #11321. Continue and verify if disk is attached, because a
			// previous attach operation may still succeed.
			glog.Errorf("Error attaching PD %q: %v", pd.pdName, err)
		}

		for numChecks := 0; numChecks < maxChecks; numChecks++ {
			if err := udevadmChangeToNewDrives(sdBeforeSet); err != nil {
				// udevadm errors should not block disk attachment, log and continue
				glog.Errorf("%v", err)
			}

			for _, path := range devicePaths {
				if pathExists, err := pathExists(path); err != nil {
					// Retry on error. See issue #11321
					glog.Errorf("Error checking if path exists: %v", err)
				} else if pathExists {
					// A device path has succesfully been created for the PD
					glog.Infof("Succesfully attached GCE PD %q.", pd.pdName)
					return path, nil
				}
			}

			// Sleep then check again
			glog.V(3).Infof("Waiting for GCE PD %q to attach.", pd.pdName)
			time.Sleep(checkSleepDuration)
		}
	}

	return "", fmt.Errorf("Could not attach GCE PD %q. Timeout waiting for mount paths to be created.", pd.pdName)
}
예제 #11
0
// Attaches a disk specified by a volume.GCEPersistentDisk to the current kubelet.
// Mounts the disk to it's global path.
func (util *GCEDiskUtil) AttachDisk(GCEPD *GCEPersistentDisk) error {
	gce, err := cloudprovider.GetCloudProvider("gce", nil)
	if err != nil {
		return err
	}
	flags := uintptr(0)
	if GCEPD.ReadOnly {
		flags = MOUNT_MS_RDONLY
	}
	if err := gce.(*gce_cloud.GCECloud).AttachDisk(GCEPD.PDName, GCEPD.ReadOnly); err != nil {
		return err
	}
	devicePath := path.Join("/dev/disk/by-id/", "google-"+GCEPD.PDName)
	if GCEPD.Partition != "" {
		devicePath = devicePath + "-part" + GCEPD.Partition
	}
	//TODO(jonesdl) There should probably be better method than busy-waiting here.
	numTries := 0
	for {
		_, err := os.Stat(devicePath)
		if err == nil {
			break
		}
		if err != nil && !os.IsNotExist(err) {
			return err
		}
		numTries++
		if numTries == 10 {
			return errors.New("Could not attach disk: Timeout after 10s")
		}
		time.Sleep(time.Second)
	}
	globalPDPath := makeGlobalPDName(GCEPD.RootDir, GCEPD.PDName, GCEPD.ReadOnly)
	// Only mount the PD globally once.
	_, err = os.Stat(globalPDPath)
	if os.IsNotExist(err) {
		err = os.MkdirAll(globalPDPath, 0750)
		if err != nil {
			return err
		}
		err = GCEPD.mounter.Mount(devicePath, globalPDPath, GCEPD.FSType, flags, "")
		if err != nil {
			os.RemoveAll(globalPDPath)
			return err
		}
	} else if err != nil {
		return err
	}
	return nil
}
예제 #12
0
// Attaches a disk specified by a volume.GCEPersistentDisk to the current kubelet.
// Mounts the disk to it's global path.
func (diskUtil *GCEDiskUtil) AttachAndMountDisk(pd *gcePersistentDisk, globalPDPath string) error {
	glog.V(5).Infof("AttachAndMountDisk(pd, %q) where pd is %#v\r\n", globalPDPath, pd)
	// Terminate any in progress verify detach go routines, this will block until the goroutine is ready to exit because the channel is unbuffered
	detachCleanupManager.Send(pd.pdName, true)
	sdBefore, err := filepath.Glob(diskSDPattern)
	if err != nil {
		glog.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err)
	}
	sdBeforeSet := util.NewStringSet(sdBefore...)

	gce, err := cloudprovider.GetCloudProvider("gce", nil)
	if err != nil {
		return err
	}

	if err := gce.(*gce_cloud.GCECloud).AttachDisk(pd.pdName, pd.readOnly); err != nil {
		return err
	}

	devicePath, err := verifyAttached(pd, sdBeforeSet, gce)
	if err != nil {
		return err
	}

	// Only mount the PD globally once.
	mountpoint, err := pd.mounter.IsMountPoint(globalPDPath)
	if err != nil {
		if os.IsNotExist(err) {
			if err := os.MkdirAll(globalPDPath, 0750); err != nil {
				return err
			}
			mountpoint = false
		} else {
			return err
		}
	}
	options := []string{}
	if pd.readOnly {
		options = append(options, "ro")
	}
	if !mountpoint {
		err = pd.diskMounter.Mount(devicePath, globalPDPath, pd.fsType, options)
		if err != nil {
			os.Remove(globalPDPath)
			return err
		}
	}
	return nil
}
예제 #13
0
파일: gce_util.go 프로젝트: cjnygard/origin
// Unmounts the device and detaches the disk from the kubelet's host machine.
func (util *GCEDiskUtil) DetachDisk(pd *gcePersistentDisk) error {
	// Unmount the global PD mount, which should be the only one.
	globalPDPath := makeGlobalPDName(pd.plugin.host, pd.pdName)
	if err := pd.mounter.Unmount(globalPDPath); err != nil {
		return err
	}
	if err := os.Remove(globalPDPath); err != nil {
		return err
	}
	// Detach the disk
	gce, err := cloudprovider.GetCloudProvider("gce", nil)
	if err != nil {
		return err
	}
	if err := gce.(*gce_cloud.GCECloud).DetachDisk(pd.pdName); err != nil {
		return err
	}
	return nil
}
예제 #14
0
파일: gce_util.go 프로젝트: cjnygard/origin
// Attaches a disk specified by a volume.GCEPersistentDisk to the current kubelet.
// Mounts the disk to it's global path.
func (util *GCEDiskUtil) AttachAndMountDisk(pd *gcePersistentDisk, globalPDPath string) error {
	gce, err := cloudprovider.GetCloudProvider("gce", nil)
	if err != nil {
		return err
	}
	if err := gce.(*gce_cloud.GCECloud).AttachDisk(pd.pdName, pd.readOnly); err != nil {
		return err
	}

	devicePaths := []string{
		path.Join("/dev/disk/by-id/", "google-"+pd.pdName),
		path.Join("/dev/disk/by-id/", "scsi-0Google_PersistentDisk_"+pd.pdName),
	}

	if pd.partition != "" {
		for i, path := range devicePaths {
			devicePaths[i] = path + "-part" + pd.partition
		}
	}
	//TODO(jonesdl) There should probably be better method than busy-waiting here.
	numTries := 0
	devicePath := ""
	// Wait for the disk device to be created
	for {
		for _, path := range devicePaths {
			_, err := os.Stat(path)
			if err == nil {
				devicePath = path
				break
			}
			if err != nil && !os.IsNotExist(err) {
				return err
			}
		}
		if devicePath != "" {
			break
		}
		numTries++
		if numTries == 10 {
			return errors.New("Could not attach disk: Timeout after 10s")
		}
		time.Sleep(time.Second)
	}

	// Only mount the PD globally once.
	mountpoint, err := pd.mounter.IsMountPoint(globalPDPath)
	if err != nil {
		if os.IsNotExist(err) {
			if err := os.MkdirAll(globalPDPath, 0750); err != nil {
				return err
			}
			mountpoint = false
		} else {
			return err
		}
	}
	options := []string{}
	if pd.readOnly {
		options = append(options, "ro")
	}
	if !mountpoint {
		err = pd.diskMounter.Mount(devicePath, globalPDPath, pd.fsType, options)
		if err != nil {
			os.Remove(globalPDPath)
			return err
		}
	}
	return nil
}
예제 #15
0
// Detaches the specified persistent disk device from node, verifies that it is detached, and retries if it fails.
// This function is intended to be called asynchronously as a go routine.
// It starts the detachCleanupManager with the specified pdName so that callers can wait for completion.
func detachDiskAndVerify(pd *gcePersistentDisk) {
	glog.V(5).Infof("detachDiskAndVerify for pd %q.", pd.pdName)
	defer util.HandleCrash()

	// Start operation, so that other threads can wait on this detach operation.
	// Set bufferSize to 0 so senders are blocked on send until we recieve.
	ch, err := detachCleanupManager.Start(pd.pdName, 0 /* bufferSize */)
	if err != nil {
		glog.Errorf("Error adding %q to detachCleanupManager: %v", pd.pdName, err)
		return
	}

	defer detachCleanupManager.Close(pd.pdName)

	defer func() {
		// Unblock any callers that have been waiting for this detach routine to complete.
		for {
			select {
			case <-ch:
				glog.V(5).Infof("detachDiskAndVerify for pd %q clearing chan.", pd.pdName)
			default:
				glog.V(5).Infof("detachDiskAndVerify for pd %q done clearing chans.", pd.pdName)
				return
			}
		}
	}()

	devicePaths := getDiskByIdPaths(pd)
	var gce cloudprovider.Interface
	for numRetries := 0; numRetries < maxRetries; numRetries++ {
		if gce == nil {
			var err error
			gce, err = cloudprovider.GetCloudProvider("gce", nil)
			if err != nil || gce == nil {
				// Retry on error. See issue #11321
				glog.Errorf("Error getting GCECloudProvider while detaching PD %q: %v", pd.pdName, err)
				gce = nil
				time.Sleep(errorSleepDuration)
				continue
			}
		}

		if numRetries > 0 {
			glog.Warningf("Timed out waiting for GCE PD %q to detach. Retrying detach.", pd.pdName)
		}

		if err := gce.(*gce_cloud.GCECloud).DetachDisk(pd.pdName); err != nil {
			// Retry on error. See issue #11321. Continue and verify if disk is detached, because a
			// previous detach operation may still succeed.
			glog.Errorf("Error detaching PD %q: %v", pd.pdName, err)
		}

		for numChecks := 0; numChecks < maxChecks; numChecks++ {
			allPathsRemoved := true
			for _, path := range devicePaths {
				if err := udevadmChangeToDrive(path); err != nil {
					// udevadm errors should not block disk detachment, log and continue
					glog.Errorf("%v", err)
				}
				if exists, err := pathExists(path); err != nil {
					// Retry on error. See issue #11321
					glog.Errorf("Error checking if path exists: %v", err)
				} else {
					allPathsRemoved = allPathsRemoved && !exists
				}
			}
			if allPathsRemoved {
				// All paths to the PD have been succefully removed
				glog.Infof("Succesfully detached GCE PD %q.", pd.pdName)
				return
			}

			// Sleep then check again
			glog.V(3).Infof("Waiting for GCE PD %q to detach.", pd.pdName)
			time.Sleep(checkSleepDuration)
		}

	}

	glog.Errorf("Failed to detach GCE PD %q. One or more mount paths was not removed.", pd.pdName)
}