Exemplo n.º 1
0
// Detaches the specified persistent disk device from node, verifies that it is detached, and retries if it fails.
// This function is intended to be called asynchronously as a go routine.
func detachDiskAndVerify(c *gcePersistentDiskCleaner) {
	glog.V(5).Infof("detachDiskAndVerify(...) for pd %q. Will block for pending operations", c.pdName)
	defer util.HandleCrash()

	// Block execution until any pending attach/detach operations for this PD have completed
	attachDetachMutex.LockKey(c.pdName)
	defer attachDetachMutex.UnlockKey(c.pdName)

	glog.V(5).Infof("detachDiskAndVerify(...) for pd %q. Awake and ready to execute.", c.pdName)

	devicePaths := getDiskByIdPaths(c.gcePersistentDisk)
	var gceCloud *gcecloud.GCECloud
	for numRetries := 0; numRetries < maxRetries; numRetries++ {
		var err error
		if gceCloud == nil {
			gceCloud, err = getCloudProvider()
			if err != nil || gceCloud == nil {
				// Retry on error. See issue #11321
				glog.Errorf("Error getting GCECloudProvider while detaching PD %q: %v", c.pdName, err)
				time.Sleep(errorSleepDuration)
				continue
			}
		}

		if numRetries > 0 {
			glog.Warningf("Retrying detach for GCE PD %q (retry count=%v).", c.pdName, numRetries)
		}

		if err := gceCloud.DetachDisk(c.pdName, c.plugin.host.GetHostName()); err != nil {
			glog.Errorf("Error detaching PD %q: %v", c.pdName, err)
			time.Sleep(errorSleepDuration)
			continue
		}

		for numChecks := 0; numChecks < maxChecks; numChecks++ {
			allPathsRemoved, err := verifyAllPathsRemoved(devicePaths)
			if err != nil {
				// Log error, if any, and continue checking periodically.
				glog.Errorf("Error verifying GCE PD (%q) is detached: %v", c.pdName, err)
			} else if allPathsRemoved {
				// All paths to the PD have been succefully removed
				unmountPDAndRemoveGlobalPath(c)
				glog.Infof("Successfully detached GCE PD %q.", c.pdName)
				return
			}

			// Sleep then check again
			glog.V(3).Infof("Waiting for GCE PD %q to detach.", c.pdName)
			time.Sleep(checkSleepDuration)
		}

	}

	glog.Errorf("Failed to detach GCE PD %q. One or more mount paths was not removed.", c.pdName)
}
Exemplo n.º 2
0
// Attaches the specified persistent disk device to node, verifies that it is attached, and retries if it fails.
func attachDiskAndVerify(b *gcePersistentDiskBuilder, sdBeforeSet sets.String) (string, error) {
	devicePaths := getDiskByIdPaths(b.gcePersistentDisk)
	var gceCloud *gce_cloud.GCECloud
	for numRetries := 0; numRetries < maxRetries; numRetries++ {
		// Block execution until any pending detach goroutines for this pd have completed
		detachCleanupManager.Send(b.pdName, true)

		var err error
		if gceCloud == nil {
			gceCloud, err = getCloudProvider()
			if err != nil || gceCloud == nil {
				// Retry on error. See issue #11321
				glog.Errorf("Error getting GCECloudProvider while detaching PD %q: %v", b.pdName, err)
				time.Sleep(errorSleepDuration)
				continue
			}
		}

		if numRetries > 0 {
			glog.Warningf("Timed out waiting for GCE PD %q to attach. Retrying attach.", b.pdName)
		}

		if err := gceCloud.AttachDisk(b.pdName, b.readOnly); err != nil {
			// Retry on error. See issue #11321.
			glog.Errorf("Error attaching PD %q: %v", b.pdName, err)
			time.Sleep(errorSleepDuration)
			continue
		}

		for numChecks := 0; numChecks < maxChecks; numChecks++ {
			path, err := verifyDevicePath(devicePaths, sdBeforeSet)
			if err != nil {
				// Log error, if any, and continue checking periodically. See issue #11321
				glog.Errorf("Error verifying GCE PD (%q) is attached: %v", b.pdName, err)
			} else if path != "" {
				// A device path has successfully been created for the PD
				glog.Infof("Successfully attached GCE PD %q.", b.pdName)
				return path, nil
			}

			// Sleep then check again
			glog.V(3).Infof("Waiting for GCE PD %q to attach.", b.pdName)
			time.Sleep(checkSleepDuration)
		}
	}

	return "", fmt.Errorf("Could not attach GCE PD %q. Timeout waiting for mount paths to be created.", b.pdName)
}
Exemplo n.º 3
0
// Attaches the specified persistent disk device to node, verifies that it is attached, and retries if it fails.
func attachDiskAndVerify(b *gcePersistentDiskMounter, sdBeforeSet sets.String) (string, error) {
	devicePaths := getDiskByIdPaths(b.gcePersistentDisk)
	var gceCloud *gcecloud.GCECloud
	for numRetries := 0; numRetries < maxRetries; numRetries++ {
		var err error
		if gceCloud == nil {
			gceCloud, err = getCloudProvider(b.gcePersistentDisk.plugin)
			if err != nil || gceCloud == nil {
				// Retry on error. See issue #11321
				glog.Errorf("Error getting GCECloudProvider while detaching PD %q: %v", b.pdName, err)
				time.Sleep(errorSleepDuration)
				continue
			}
		}

		if numRetries > 0 {
			glog.Warningf("Retrying attach for GCE PD %q (retry count=%v).", b.pdName, numRetries)
		}

		if err := gceCloud.AttachDisk(b.pdName, b.plugin.host.GetHostName(), b.readOnly); err != nil {
			glog.Errorf("Error attaching PD %q: %v", b.pdName, err)
			time.Sleep(errorSleepDuration)
			continue
		}

		for numChecks := 0; numChecks < maxChecks; numChecks++ {
			path, err := verifyDevicePath(devicePaths, sdBeforeSet)
			if err != nil {
				// Log error, if any, and continue checking periodically. See issue #11321
				glog.Errorf("Error verifying GCE PD (%q) is attached: %v", b.pdName, err)
			} else if path != "" {
				// A device path has successfully been created for the PD
				glog.Infof("Successfully attached GCE PD %q.", b.pdName)
				return path, nil
			}

			// Sleep then check again
			glog.V(3).Infof("Waiting for GCE PD %q to attach.", b.pdName)
			time.Sleep(checkSleepDuration)
		}
	}

	return "", fmt.Errorf("Could not attach GCE PD %q. Timeout waiting for mount paths to be created.", b.pdName)
}
Exemplo n.º 4
0
// Detaches the specified persistent disk device from node, verifies that it is detached, and retries if it fails.
// This function is intended to be called asynchronously as a go routine.
// It starts the detachCleanupManager with the specified pdName so that callers can wait for completion.
func detachDiskAndVerify(c *gcePersistentDiskCleaner) {
	glog.V(5).Infof("detachDiskAndVerify for pd %q.", c.pdName)
	defer util.HandleCrash()

	// Start operation, so that other threads can wait on this detach operation.
	// Set bufferSize to 0 so senders are blocked on send until we receive.
	ch, err := detachCleanupManager.Start(c.pdName, 0 /* bufferSize */)
	if err != nil {
		glog.Errorf("Error adding %q to detachCleanupManager: %v", c.pdName, err)
		return
	}

	defer detachCleanupManager.Close(c.pdName)

	defer func() {
		// Unblock any callers that have been waiting for this detach routine to complete.
		for {
			select {
			case <-ch:
				glog.V(5).Infof("detachDiskAndVerify for pd %q clearing chan.", c.pdName)
			default:
				glog.V(5).Infof("detachDiskAndVerify for pd %q done clearing chans.", c.pdName)
				return
			}
		}
	}()

	devicePaths := getDiskByIdPaths(c.gcePersistentDisk)
	var gceCloud *gce_cloud.GCECloud
	for numRetries := 0; numRetries < maxRetries; numRetries++ {
		var err error
		if gceCloud == nil {
			gceCloud, err = getCloudProvider()
			if err != nil || gceCloud == nil {
				// Retry on error. See issue #11321
				glog.Errorf("Error getting GCECloudProvider while detaching PD %q: %v", c.pdName, err)
				time.Sleep(errorSleepDuration)
				continue
			}
		}

		if numRetries > 0 {
			glog.Warningf("Timed out waiting for GCE PD %q to detach. Retrying detach.", c.pdName)
		}

		if err := gceCloud.DetachDisk(c.pdName); err != nil {
			// Retry on error. See issue #11321. Continue and verify if disk is detached, because a
			// previous detach operation may still succeed.
			glog.Errorf("Error detaching PD %q: %v", c.pdName, err)
		}

		for numChecks := 0; numChecks < maxChecks; numChecks++ {
			allPathsRemoved, err := verifyAllPathsRemoved(devicePaths)
			if err != nil {
				// Log error, if any, and continue checking periodically.
				glog.Errorf("Error verifying GCE PD (%q) is detached: %v", c.pdName, err)
			} else if allPathsRemoved {
				// All paths to the PD have been succefully removed
				glog.Infof("Successfully detached GCE PD %q.", c.pdName)
				return
			}

			// Sleep then check again
			glog.V(3).Infof("Waiting for GCE PD %q to detach.", c.pdName)
			time.Sleep(checkSleepDuration)
		}

	}

	glog.Errorf("Failed to detach GCE PD %q. One or more mount paths was not removed.", c.pdName)
}
Exemplo n.º 5
0
const (
	firewallTimeoutDefault = 3 * time.Minute
	firewallTestTcpTimeout = time.Duration(1 * time.Second)
	// Set ports outside of 30000-32767, 80 and 8080 to avoid being whitelisted by the e2e cluster
	firewallTestHttpPort = int32(29999)
	firewallTestUdpPort  = int32(29998)
)

var _ = framework.KubeDescribe("Firewall rule", func() {
	var firewall_test_name = "firewall-test"
	f := framework.NewDefaultFramework(firewall_test_name)

	var cs clientset.Interface
	var cloudConfig framework.CloudConfig
	var gceCloud *gcecloud.GCECloud

	BeforeEach(func() {
		framework.SkipUnlessProviderIs("gce")
		cs = f.ClientSet
		cloudConfig = framework.TestContext.CloudConfig
		gceCloud = cloudConfig.Provider.(*gcecloud.GCECloud)
	})

	// This test takes around 4 minutes to run
	It("[Slow] [Serial] should create valid firewall rules for LoadBalancer type service", func() {
		ns := f.Namespace.Name
		// This source ranges is just used to examine we have exact same things on LB firewall rules
		firewallTestSourceRanges := []string{"0.0.0.0/1", "128.0.0.0/1"}
		serviceName := "firewall-test-loadbalancer"