// Populates data struct with one volume1/node1 entry.
// Calls VolumeNodeExists() with volume1/node2.
// Verifies requested entry does not exist, but populated entry does.
func Test_VolumeNodeExists_Positive_VolumeExistsNodeDoesntExist(t *testing.T) {
	// Arrange
	volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
	asw := NewActualStateOfWorld(volumePluginMgr)
	volumeName := v1.UniqueVolumeName("volume-name")
	volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
	node1Name := types.NodeName("node1-name")
	node2Name := types.NodeName("node2-name")
	devicePath := "fake/device/path"
	generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, node1Name, devicePath)
	if addErr != nil {
		t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
	}

	// Act
	volumeNodeComboExists := asw.VolumeNodeExists(generatedVolumeName, node2Name)

	// Assert
	if volumeNodeComboExists {
		t.Fatalf("%q/%q volume/node combo exists, it should not.", generatedVolumeName, node2Name)
	}

	attachedVolumes := asw.GetAttachedVolumes()
	if len(attachedVolumes) != 1 {
		t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
	}

	verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), node1Name, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */)
}
func Test_GetAttachedVolumesForNode_Positive_TwoVolumeTwoNodes(t *testing.T) {
	// Arrange
	volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
	asw := NewActualStateOfWorld(volumePluginMgr)
	volume1Name := v1.UniqueVolumeName("volume1-name")
	volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
	node1Name := types.NodeName("node1-name")
	devicePath := "fake/device/path"
	_, add1Err := asw.AddVolumeNode(volume1Spec, node1Name, devicePath)
	if add1Err != nil {
		t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
	}
	volume2Name := v1.UniqueVolumeName("volume2-name")
	volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
	node2Name := types.NodeName("node2-name")
	generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Spec, node2Name, devicePath)
	if add2Err != nil {
		t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
	}

	// Act
	attachedVolumes := asw.GetAttachedVolumesForNode(node2Name)

	// Assert
	if len(attachedVolumes) != 1 {
		t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
	}

	verifyAttachedVolume(t, attachedVolumes, generatedVolumeName2, string(volume2Name), node2Name, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */)
}
func TestInstances(t *testing.T) {
	testVM, _, cfg, ok := configFromEnv()
	if !ok {
		t.Skipf("No config found in environment")
	}
	NodeName := types.NodeName(testVM)

	pc, err := newPCCloud(cfg)
	if err != nil {
		t.Fatalf("Failed to create new Photon client: %s", err)
	}

	i, ok := pc.Instances()
	if !ok {
		t.Fatalf("Instances() returned false")
	}

	externalId, err := i.ExternalID(NodeName)
	if err != nil {
		t.Fatalf("Instances.ExternalID(%s) failed: %s", testVM, err)
	}
	t.Logf("Found ExternalID(%s) = %s\n", testVM, externalId)

	nonExistingVM := types.NodeName(rand.String(15))
	externalId, err = i.ExternalID(nonExistingVM)
	if err == cloudprovider.InstanceNotFound {
		t.Logf("VM %s was not found as expected\n", nonExistingVM)
	} else if err == nil {
		t.Fatalf("Instances.ExternalID did not fail as expected, VM %s was found", nonExistingVM)
	} else {
		t.Fatalf("Instances.ExternalID did not fail as expected, err: %v", err)
	}

	instanceId, err := i.InstanceID(NodeName)
	if err != nil {
		t.Fatalf("Instances.InstanceID(%s) failed: %s", testVM, err)
	}
	t.Logf("Found InstanceID(%s) = %s\n", testVM, instanceId)

	instanceId, err = i.InstanceID(nonExistingVM)
	if err == cloudprovider.InstanceNotFound {
		t.Logf("VM %s was not found as expected\n", nonExistingVM)
	} else if err == nil {
		t.Fatalf("Instances.InstanceID did not fail as expected, VM %s was found", nonExistingVM)
	} else {
		t.Fatalf("Instances.InstanceID did not fail as expected, err: %v", err)
	}

	addrs, err := i.NodeAddresses(NodeName)
	if err != nil {
		t.Fatalf("Instances.NodeAddresses(%s) failed: %s", testVM, err)
	}
	t.Logf("Found NodeAddresses(%s) = %s\n", testVM, addrs)
}
// Populates data struct with pod/volume/node1.
// Calls DeleteNode() to delete the pod/volume/node2.
// Verifies volume still exists, and one volumes to attach.
func Test_DeletePod_Positive_NodeDoesNotExist(t *testing.T) {
	// Arrange
	volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
	dsw := NewDesiredStateOfWorld(volumePluginMgr)
	podName := "pod-uid"
	volumeName := v1.UniqueVolumeName("volume-name")
	volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
	node1Name := k8stypes.NodeName("node1-name")
	dsw.AddNode(node1Name)
	generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, node1Name)
	if podAddErr != nil {
		t.Fatalf(
			"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
			podName,
			podAddErr)
	}
	volumeExists := dsw.VolumeExists(generatedVolumeName, node1Name)
	if !volumeExists {
		t.Fatalf(
			"Added pod %q to volume %q/node %q. Volume does not exist, it should.",
			podName,
			generatedVolumeName,
			node1Name)
	}
	node2Name := k8stypes.NodeName("node2-name")

	// Act
	dsw.DeletePod(types.UniquePodName(podName), generatedVolumeName, node2Name)

	// Assert
	volumeExists = dsw.VolumeExists(generatedVolumeName, node1Name)
	if !volumeExists {
		t.Fatalf(
			"Volume %q/node %q does not exist, it should.",
			generatedVolumeName,
			node1Name)
	}
	volumeExists = dsw.VolumeExists(generatedVolumeName, node2Name)
	if volumeExists {
		t.Fatalf(
			"node %q exists, it should not.",
			node2Name)
	}

	volumesToAttach := dsw.GetVolumesToAttach()
	if len(volumesToAttach) != 1 {
		t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach))
	}

	verifyVolumeToAttach(t, volumesToAttach, node1Name, generatedVolumeName, string(volumeName))
}
// Populates data struct with two nodes with one volume/pod each and an extra
// pod for the second node/volume pair.
// Calls GetVolumesToAttach()
// Verifies two volumes to attach.
func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEachExtraPod(t *testing.T) {
	// Arrange
	volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
	dsw := NewDesiredStateOfWorld(volumePluginMgr)
	node1Name := k8stypes.NodeName("node1-name")
	pod1Name := "pod1-uid"
	volume1Name := v1.UniqueVolumeName("volume1-name")
	volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
	dsw.AddNode(node1Name)
	generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volume1Spec, node1Name)
	if podAddErr != nil {
		t.Fatalf(
			"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
			pod1Name,
			podAddErr)
	}
	node2Name := k8stypes.NodeName("node2-name")
	pod2Name := "pod2-uid"
	volume2Name := v1.UniqueVolumeName("volume2-name")
	volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
	dsw.AddNode(node2Name)
	generatedVolume2Name, podAddErr := dsw.AddPod(types.UniquePodName(pod2Name), controllervolumetesting.NewPod(pod2Name, pod2Name), volume2Spec, node2Name)
	if podAddErr != nil {
		t.Fatalf(
			"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
			pod2Name,
			podAddErr)
	}
	pod3Name := "pod3-uid"
	dsw.AddPod(types.UniquePodName(pod3Name), controllervolumetesting.NewPod(pod3Name, pod3Name), volume2Spec, node2Name)
	_, podAddErr = dsw.AddPod(types.UniquePodName(pod3Name), controllervolumetesting.NewPod(pod3Name, pod3Name), volume2Spec, node2Name)
	if podAddErr != nil {
		t.Fatalf(
			"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
			pod3Name,
			podAddErr)
	}

	// Act
	volumesToAttach := dsw.GetVolumesToAttach()

	// Assert
	if len(volumesToAttach) != 2 {
		t.Fatalf("len(volumesToAttach) Expected: <2> Actual: <%v>", len(volumesToAttach))
	}

	verifyVolumeToAttach(t, volumesToAttach, node1Name, generatedVolume1Name, string(volume1Name))
	verifyVolumeToAttach(t, volumesToAttach, node2Name, generatedVolume2Name, string(volume2Name))
}
// Populates data struct with a single node.
// Calls DeleteNode() to delete the node.
// Verifies node no longer exists, and zero volumes to attach.
func Test_DeleteNode_Positive_NodeExists(t *testing.T) {
	// Arrange
	volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
	dsw := NewDesiredStateOfWorld(volumePluginMgr)
	nodeName := k8stypes.NodeName("node-name")
	dsw.AddNode(nodeName)

	// Act
	err := dsw.DeleteNode(nodeName)

	// Assert
	if err != nil {
		t.Fatalf("DeleteNode failed. Expected: <no error> Actual: <%v>", err)
	}

	nodeExists := dsw.NodeExists(nodeName)
	if nodeExists {
		t.Fatalf("Deleted node %q still exists, it should not.", nodeName)
	}

	volumesToAttach := dsw.GetVolumesToAttach()
	if len(volumesToAttach) != 0 {
		t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach))
	}
}
// Populates data struct with one volume/node entry.
// Calls SetVolumeMountedByNode once, setting mounted to false.
// Verifies mountedByNode is still true (since there was no SetVolumeMountedByNode to true call first)
func Test_SetVolumeMountedByNode_Positive_UnsetWithoutInitialSet(t *testing.T) {
	// Arrange
	volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
	asw := NewActualStateOfWorld(volumePluginMgr)
	volumeName := v1.UniqueVolumeName("volume-name")
	volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
	nodeName := types.NodeName("node-name")
	devicePath := "fake/device/path"
	generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
	if addErr != nil {
		t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
	}

	// Act
	setVolumeMountedErr := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */)

	// Assert
	if setVolumeMountedErr != nil {
		t.Fatalf("SetVolumeMountedByNode failed. Expected <no error> Actual: <%v>", setVolumeMountedErr)
	}

	attachedVolumes := asw.GetAttachedVolumes()
	if len(attachedVolumes) != 1 {
		t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
	}

	verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */)
}
// Populates data struct with one volume/node entry.
// Calls RemoveVolumeFromReportAsAttached() once on volume/node entry.
// Verifies mountedByNode is true and detachRequestedTime is NOT zero.
func Test_RemoveVolumeFromReportAsAttached_Positive_Marked(t *testing.T) {
	// Arrange
	volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
	asw := NewActualStateOfWorld(volumePluginMgr)
	volumeName := v1.UniqueVolumeName("volume-name")
	volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
	nodeName := types.NodeName("node-name")
	devicePath := "fake/device/path"
	generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
	if addErr != nil {
		t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
	}

	// Act
	_, err := asw.SetDetachRequestTime(generatedVolumeName, nodeName)
	if err != nil {
		t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err)
	}
	markDesireToDetachErr := asw.RemoveVolumeFromReportAsAttached(generatedVolumeName, nodeName)
	if markDesireToDetachErr != nil {
		t.Fatalf("MarkDesireToDetach failed. Expected: <no error> Actual: <%v>", markDesireToDetachErr)
	}

	// Assert
	attachedVolumes := asw.GetAttachedVolumes()
	if len(attachedVolumes) != 1 {
		t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
	}

	verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, true /* expectNonZeroDetachRequestedTime */)
}
Example #9
0
// PerformTLSBootstrap creates a RESTful client in order to execute certificate signing request.
func PerformTLSBootstrap(s *kubeadmapi.KubeadmConfig, apiEndpoint string, caCert []byte) (*clientcmdapi.Config, error) {
	// TODO(phase1+) try all the api servers until we find one that works
	bareClientConfig := kubeadmutil.CreateBasicClientConfig("kubernetes", apiEndpoint, caCert)

	hostName, err := os.Hostname()
	if err != nil {
		return nil, fmt.Errorf("<node/csr> failed to get node hostname [%v]", err)
	}

	// TODO: hostname == nodename doesn't hold on all clouds (AWS).
	// But we don't have a cloudprovider, so we're stuck.
	glog.Errorf("assuming that hostname is the same as NodeName")
	nodeName := types.NodeName(hostName)

	bootstrapClientConfig, err := clientcmd.NewDefaultClientConfig(
		*kubeadmutil.MakeClientConfigWithToken(
			bareClientConfig, "kubernetes", fmt.Sprintf("kubelet-%s", nodeName), s.Secrets.BearerToken,
		),
		&clientcmd.ConfigOverrides{},
	).ClientConfig()
	if err != nil {
		return nil, fmt.Errorf("<node/csr> failed to create API client configuration [%v]", err)
	}

	client, err := unversionedcertificates.NewForConfig(bootstrapClientConfig)
	if err != nil {
		return nil, fmt.Errorf("<node/csr> failed to create API client [%v]", err)
	}
	csrClient := client.CertificateSigningRequests()

	// TODO(phase1+) checkCertsAPI() has a side-effect of making first attempt of communicating with the API,
	// we should _make it more explicit_ and have a user-settable _retry timeout_ to account for potential connectivity issues
	// (for example user may be bringing up machines in parallel and for some reasons master is slow to boot)

	if err := checkCertsAPI(bootstrapClientConfig); err != nil {
		return nil, fmt.Errorf("<node/csr> failed to proceed due to API compatibility issue - %v", err)
	}

	fmt.Println("<node/csr> created API client to obtain unique certificate for this node, generating keys and certificate signing request")

	key, err := certutil.MakeEllipticPrivateKeyPEM()
	if err != nil {
		return nil, fmt.Errorf("<node/csr> failed to generating private key [%v]", err)
	}

	cert, err := csr.RequestNodeCertificate(csrClient, key, nodeName)
	if err != nil {
		return nil, fmt.Errorf("<node/csr> failed to request signed certificate from the API server [%v]", err)
	}

	// TODO(phase1+) print some basic info about the cert
	fmt.Println("<node/csr> received signed certificate from the API server, generating kubelet configuration")

	finalConfig := kubeadmutil.MakeClientConfigWithCerts(
		bareClientConfig, "kubernetes", fmt.Sprintf("kubelet-%s", nodeName),
		key, cert,
	)

	return finalConfig, nil
}
Example #10
0
func TestReadPodsFromFileExistAlready(t *testing.T) {
	hostname := types.NodeName("random-test-hostname")
	var testCases = getTestCases(hostname)

	for _, testCase := range testCases {
		func() {
			dirName, err := utiltesting.MkTmpdir("file-test")
			if err != nil {
				t.Fatalf("unable to create temp dir: %v", err)
			}
			defer os.RemoveAll(dirName)
			file := testCase.writeToFile(dirName, "test_pod_config", t)

			ch := make(chan interface{})
			NewSourceFile(file, hostname, time.Millisecond, ch)
			select {
			case got := <-ch:
				update := got.(kubetypes.PodUpdate)
				for _, pod := range update.Pods {
					if errs := validation.ValidatePod(pod); len(errs) > 0 {
						t.Fatalf("%s: Invalid pod %#v, %#v", testCase.desc, pod, errs)
					}
				}
				if !api.Semantic.DeepEqual(testCase.expected, update) {
					t.Fatalf("%s: Expected %#v, Got %#v", testCase.desc, testCase.expected, update)
				}
			case <-time.After(wait.ForeverTestTimeout):
				t.Fatalf("%s: Expected update, timeout instead", testCase.desc)
			}
		}()
	}
}
Example #11
0
// PortForwardLocation returns the port-forward URL for a pod.
func PortForwardLocation(
	getter ResourceGetter,
	connInfo client.ConnectionInfoGetter,
	ctx api.Context,
	name string,
) (*url.URL, http.RoundTripper, error) {
	pod, err := getPod(getter, ctx, name)
	if err != nil {
		return nil, nil, err
	}

	nodeName := types.NodeName(pod.Spec.NodeName)
	if len(nodeName) == 0 {
		// If pod has not been assigned a host, return an empty location
		return nil, nil, errors.NewBadRequest(fmt.Sprintf("pod %s does not have a host assigned", name))
	}
	nodeInfo, err := connInfo.GetConnectionInfo(ctx, nodeName)
	if err != nil {
		return nil, nil, err
	}
	loc := &url.URL{
		Scheme: nodeInfo.Scheme,
		Host:   net.JoinHostPort(nodeInfo.Hostname, nodeInfo.Port),
		Path:   fmt.Sprintf("/portForward/%s/%s", pod.Namespace, pod.Name),
	}
	return loc, nodeInfo.Transport, nil
}
// Implementation of Instances.CurrentNodeName
// Note this is *not* necessarily the same as hostname.
func (i *Instances) CurrentNodeName(hostname string) (types.NodeName, error) {
	md, err := getMetadata()
	if err != nil {
		return "", err
	}
	return types.NodeName(md.Name), nil
}
// Populates data struct with one volume/node entry.
// Calls RemoveVolumeFromReportAsAttached
// Verifyies there is no valume as reported as attached
func Test_RemoveVolumeFromReportAsAttached(t *testing.T) {
	// Arrange
	volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
	asw := NewActualStateOfWorld(volumePluginMgr)
	volumeName := v1.UniqueVolumeName("volume-name")
	volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
	nodeName := types.NodeName("node-name")
	devicePath := "fake/device/path"
	generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
	if addErr != nil {
		t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
	}

	removeVolumeDetachErr := asw.RemoveVolumeFromReportAsAttached(generatedVolumeName, nodeName)
	if removeVolumeDetachErr != nil {
		t.Fatalf("RemoveVolumeFromReportAsAttached failed. Expected: <no error> Actual: <%v>", removeVolumeDetachErr)
	}

	reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached()
	volumes, exists := reportAsAttachedVolumesMap[nodeName]
	if !exists {
		t.Fatalf("MarkDesireToDetach_UnmarkDesireToDetach failed. Expected: <node %q exist> Actual: <node does not exist in the reportedAsAttached map", nodeName)
	}
	if len(volumes) > 0 {
		t.Fatalf("len(reportAsAttachedVolumes) Expected: <0> Actual: <%v>", len(volumes))
	}

}
// Populates data struct with new pod/volume/node.
// Calls VolumeExists() on that volume/node.
// Verifies volume/node exists, and one volume to attach.
func Test_VolumeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
	// Arrange
	volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
	dsw := NewDesiredStateOfWorld(volumePluginMgr)
	nodeName := k8stypes.NodeName("node-name")
	dsw.AddNode(nodeName)
	podName := "pod-uid"
	volumeName := v1.UniqueVolumeName("volume-name")
	volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
	generatedVolumeName, _ := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)

	// Act
	volumeExists := dsw.VolumeExists(generatedVolumeName, nodeName)

	// Assert
	if !volumeExists {
		t.Fatalf("Volume %q does not exist, it should.", generatedVolumeName)
	}

	volumesToAttach := dsw.GetVolumesToAttach()
	if len(volumesToAttach) != 1 {
		t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach))
	}

	verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolumeName, string(volumeName))
}
// Calls AddNode() once.
// Verifies node exists.
// Calls AddNode() again with the same node.
// Verifies node exists, and zero volumes to attach.
func Test_AddNode_Positive_ExistingNode(t *testing.T) {
	// Arrange
	volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
	dsw := NewDesiredStateOfWorld(volumePluginMgr)
	nodeName := k8stypes.NodeName("node-name")

	// Act
	dsw.AddNode(nodeName)

	// Assert
	nodeExists := dsw.NodeExists(nodeName)
	if !nodeExists {
		t.Fatalf("Added node %q does not exist, it should.", nodeName)
	}

	// Act
	dsw.AddNode(nodeName)

	// Assert
	nodeExists = dsw.NodeExists(nodeName)
	if !nodeExists {
		t.Fatalf("Added node %q does not exist, it should.", nodeName)
	}

	volumesToAttach := dsw.GetVolumesToAttach()
	if len(volumesToAttach) != 0 {
		t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach))
	}
}
// Populates data struct with one volume/node entry.
// Calls SetDetachRequestTime twice and sleep maxWaitTime (1 second) in between
// The elapsed time returned from the first SetDetachRequestTime call should be smaller than maxWaitTime
// The elapsed time returned from the second SetDetachRequestTime call should be larger than maxWaitTime
func Test_SetDetachRequestTime_Positive(t *testing.T) {
	// Arrange
	volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
	asw := NewActualStateOfWorld(volumePluginMgr)
	volumeName := v1.UniqueVolumeName("volume-name")
	volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
	nodeName := types.NodeName("node-name")
	devicePath := "fake/device/path"
	generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
	if addErr != nil {
		t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
	}

	maxWaitTime := 1 * time.Second
	etime, err := asw.SetDetachRequestTime(generatedVolumeName, nodeName)
	if err != nil {
		t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err)
	}
	if etime >= maxWaitTime {
		t.Logf("SetDetachRequestTim Expected: <elapsed time %v is smaller than maxWaitTime %v> Actual <elapsed time is larger than maxWaitTime>", etime, maxWaitTime)
	}
	// Sleep and call SetDetachRequestTime again
	time.Sleep(maxWaitTime)
	etime, err = asw.SetDetachRequestTime(generatedVolumeName, nodeName)
	if err != nil {
		t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err)
	}
	if etime < maxWaitTime {
		t.Fatalf("SetDetachRequestTim Expected: <elapsed time %v is larger than maxWaitTime %v> Actual <elapsed time is smaller>", etime, maxWaitTime)
	}
}
// Populates data struct with one volume/node entry.
// Calls DeleteVolumeNode() to delete volume/node.
// Verifies no volume/node entries exists.
func Test_DeleteVolumeNode_Positive_VolumeExistsNodeExists(t *testing.T) {
	// Arrange
	volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
	asw := NewActualStateOfWorld(volumePluginMgr)
	volumeName := v1.UniqueVolumeName("volume-name")
	volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
	nodeName := types.NodeName("node-name")
	devicePath := "fake/device/path"
	generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
	if addErr != nil {
		t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
	}

	// Act
	asw.DeleteVolumeNode(generatedVolumeName, nodeName)

	// Assert
	volumeNodeComboExists := asw.VolumeNodeExists(generatedVolumeName, nodeName)
	if volumeNodeComboExists {
		t.Fatalf("%q/%q volume/node combo exists, it should not.", generatedVolumeName, nodeName)
	}

	attachedVolumes := asw.GetAttachedVolumes()
	if len(attachedVolumes) != 0 {
		t.Fatalf("len(attachedVolumes) Expected: <0> Actual: <%v>", len(attachedVolumes))
	}
}
Example #18
0
// ResourceLocation returns an URL and transport which one can use to send traffic for the specified node.
func ResourceLocation(getter ResourceGetter, connection client.ConnectionInfoGetter, proxyTransport http.RoundTripper, ctx api.Context, id string) (*url.URL, http.RoundTripper, error) {
	schemeReq, name, portReq, valid := utilnet.SplitSchemeNamePort(id)
	if !valid {
		return nil, nil, errors.NewBadRequest(fmt.Sprintf("invalid node request %q", id))
	}

	info, err := connection.GetConnectionInfo(ctx, types.NodeName(name))
	if err != nil {
		return nil, nil, err
	}

	// We check if we want to get a default Kubelet's transport. It happens if either:
	// - no port is specified in request (Kubelet's port is default)
	// - the requested port matches the kubelet port for this node
	if portReq == "" || portReq == info.Port {
		return &url.URL{
				Scheme: info.Scheme,
				Host:   net.JoinHostPort(info.Hostname, info.Port),
			},
			info.Transport,
			nil
	}

	// Otherwise, return the requested scheme and port, and the proxy transport
	return &url.URL{Scheme: schemeReq, Host: net.JoinHostPort(info.Hostname, portReq)}, proxyTransport, nil
}
// Calls AddVolumeNode() twice. Second time use a different node name.
// Verifies two volume/node entries exist with the same volumeSpec.
func Test_AddVolumeNode_Positive_ExistingVolumeNewNode(t *testing.T) {
	// Arrange
	volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
	asw := NewActualStateOfWorld(volumePluginMgr)
	volumeName := v1.UniqueVolumeName("volume-name")
	volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
	node1Name := types.NodeName("node1-name")
	node2Name := types.NodeName("node2-name")
	devicePath := "fake/device/path"

	// Act
	generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, node1Name, devicePath)
	generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeSpec, node2Name, devicePath)

	// Assert
	if add1Err != nil {
		t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
	}
	if add2Err != nil {
		t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
	}

	if generatedVolumeName1 != generatedVolumeName2 {
		t.Fatalf(
			"Generated volume names for the same volume should be the same but they are not: %q and %q",
			generatedVolumeName1,
			generatedVolumeName2)
	}

	volumeNode1ComboExists := asw.VolumeNodeExists(generatedVolumeName1, node1Name)
	if !volumeNode1ComboExists {
		t.Fatalf("%q/%q volume/node combo does not exist, it should.", generatedVolumeName1, node1Name)
	}

	volumeNode2ComboExists := asw.VolumeNodeExists(generatedVolumeName1, node2Name)
	if !volumeNode2ComboExists {
		t.Fatalf("%q/%q volume/node combo does not exist, it should.", generatedVolumeName1, node2Name)
	}

	attachedVolumes := asw.GetAttachedVolumes()
	if len(attachedVolumes) != 2 {
		t.Fatalf("len(attachedVolumes) Expected: <2> Actual: <%v>", len(attachedVolumes))
	}

	verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, string(volumeName), node1Name, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */)
	verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, string(volumeName), node2Name, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */)
}
Example #20
0
// Populates desiredStateOfWorld cache with one node/volume/pod tuple.
// Has node update fail
// Calls Run()
// Verifies there is one attach call and no detach calls.
// Marks the node/volume as unmounted.
// Deletes the node/volume/pod tuple from desiredStateOfWorld cache.
// Verifies there are NO detach call and no (new) attach calls.
func Test_Run_Negative_OneDesiredVolumeAttachThenDetachWithUnmountedVolumeUpdateStatusFail(t *testing.T) {
	// Arrange
	volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
	dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
	asw := cache.NewActualStateOfWorld(volumePluginMgr)
	fakeKubeClient := controllervolumetesting.CreateTestClient()
	fakeRecorder := &record.FakeRecorder{}
	ad := operationexecutor.NewOperationExecutor(fakeKubeClient, volumePluginMgr, fakeRecorder, false)
	nsu := statusupdater.NewFakeNodeStatusUpdater(true /* returnError */)
	reconciler := NewReconciler(
		reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, dsw, asw, ad, nsu)
	podName := "pod-uid"
	volumeName := v1.UniqueVolumeName("volume-name")
	volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
	nodeName := k8stypes.NodeName("node-name")
	dsw.AddNode(nodeName)
	volumeExists := dsw.VolumeExists(volumeName, nodeName)
	if volumeExists {
		t.Fatalf(
			"Volume %q/node %q should not exist, but it does.",
			volumeName,
			nodeName)
	}

	generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
	if podAddErr != nil {
		t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podAddErr)
	}

	// Act
	go reconciler.Run(wait.NeverStop)

	// Assert
	waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
	verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
	waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
	verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
	waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)

	// Act
	dsw.DeletePod(types.UniquePodName(podName), generatedVolumeName, nodeName)
	volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName)
	if volumeExists {
		t.Fatalf(
			"Deleted pod %q from volume %q/node %q. Volume should also be deleted but it still exists.",
			podName,
			generatedVolumeName,
			nodeName)
	}
	asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */)
	asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */)

	// Assert
	verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
	verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
	waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
	verifyNewDetacherCallCount(t, false /* expectZeroNewDetacherCallCount */, fakePlugin)
	waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)
}
// Populates data struct with two nodes.
// Calls GetVolumesToAttach()
// Verifies zero volumes to attach.
func Test_GetVolumesToAttach_Positive_TwoNodes(t *testing.T) {
	// Arrange
	volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
	dsw := NewDesiredStateOfWorld(volumePluginMgr)
	node1Name := k8stypes.NodeName("node1-name")
	node2Name := k8stypes.NodeName("node2-name")
	dsw.AddNode(node1Name)
	dsw.AddNode(node2Name)

	// Act
	volumesToAttach := dsw.GetVolumesToAttach()

	// Assert
	if len(volumesToAttach) != 0 {
		t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach))
	}
}
// Populates data struct with pod1/volume/node and pod2/volume/node.
// Calls DeleteNode() to delete the pod1/volume/node.
// Verifies volume still exists, and one volumes to attach.
func Test_DeletePod_Positive_2PodsExistNodeExistsVolumesExist(t *testing.T) {
	// Arrange
	volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
	dsw := NewDesiredStateOfWorld(volumePluginMgr)
	pod1Name := "pod1-uid"
	pod2Name := "pod2-uid"
	volumeName := v1.UniqueVolumeName("volume-name")
	volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
	nodeName := k8stypes.NodeName("node-name")
	dsw.AddNode(nodeName)
	generatedVolumeName1, pod1AddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volumeSpec, nodeName)
	if pod1AddErr != nil {
		t.Fatalf(
			"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
			pod1Name,
			pod1AddErr)
	}
	generatedVolumeName2, pod2AddErr := dsw.AddPod(types.UniquePodName(pod2Name), controllervolumetesting.NewPod(pod2Name, pod2Name), volumeSpec, nodeName)
	if pod2AddErr != nil {
		t.Fatalf(
			"AddPod failed for pod %q. Expected: <no error> Actual: <%v>",
			pod2Name,
			pod2AddErr)
	}
	if generatedVolumeName1 != generatedVolumeName2 {
		t.Fatalf(
			"Generated volume names for the same volume should be the same but they are not: %q and %q",
			generatedVolumeName1,
			generatedVolumeName2)
	}
	volumeExists := dsw.VolumeExists(generatedVolumeName1, nodeName)
	if !volumeExists {
		t.Fatalf(
			"Volume %q does not exist under node %q, it should.",
			generatedVolumeName1,
			nodeName)
	}

	// Act
	dsw.DeletePod(types.UniquePodName(pod1Name), generatedVolumeName1, nodeName)

	// Assert
	volumeExists = dsw.VolumeExists(generatedVolumeName1, nodeName)
	if !volumeExists {
		t.Fatalf(
			"Volume %q under node %q should still exist, but it does not.",
			generatedVolumeName1,
			nodeName)
	}

	volumesToAttach := dsw.GetVolumesToAttach()
	if len(volumesToAttach) != 1 {
		t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach))
	}

	verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolumeName1, string(volumeName))
}
Example #23
0
// PerformTLSBootstrap creates a RESTful client in order to execute certificate signing request.
func PerformTLSBootstrap(s *kubeadmapi.NodeConfiguration, apiEndpoint string, caCert []byte) (*clientcmdapi.Config, error) {
	// TODO(phase1+) try all the api servers until we find one that works
	bareClientConfig := kubeadmutil.CreateBasicClientConfig("kubernetes", apiEndpoint, caCert)

	hostName, err := os.Hostname()
	if err != nil {
		return nil, fmt.Errorf("<node/csr> failed to get node hostname [%v]", err)
	}

	// TODO(phase1+) https://github.com/kubernetes/kubernetes/issues/33641
	nodeName := types.NodeName(hostName)

	bootstrapClientConfig, err := clientcmd.NewDefaultClientConfig(
		*kubeadmutil.MakeClientConfigWithToken(
			bareClientConfig, "kubernetes", fmt.Sprintf("kubelet-%s", nodeName), s.Secrets.BearerToken,
		),
		&clientcmd.ConfigOverrides{},
	).ClientConfig()
	if err != nil {
		return nil, fmt.Errorf("<node/csr> failed to create API client configuration [%v]", err)
	}

	client, err := unversionedcertificates.NewForConfig(bootstrapClientConfig)
	if err != nil {
		return nil, fmt.Errorf("<node/csr> failed to create API client [%v]", err)
	}
	csrClient := client.CertificateSigningRequests()

	// TODO(phase1+) https://github.com/kubernetes/kubernetes/issues/33643

	if err := checkCertsAPI(bootstrapClientConfig); err != nil {
		return nil, fmt.Errorf("<node/csr> failed to proceed due to API compatibility issue - %v", err)
	}

	fmt.Println("<node/csr> created API client to obtain unique certificate for this node, generating keys and certificate signing request")

	key, err := certutil.MakeEllipticPrivateKeyPEM()
	if err != nil {
		return nil, fmt.Errorf("<node/csr> failed to generating private key [%v]", err)
	}
	cert, err := csr.RequestNodeCertificate(csrClient, key, nodeName)
	if err != nil {
		return nil, fmt.Errorf("<node/csr> failed to request signed certificate from the API server [%v]", err)
	}
	fmtCert, err := certutil.FormatBytesCert(cert)
	if err != nil {
		return nil, fmt.Errorf("<node/csr> failed to format certificate [%v]", err)
	}
	fmt.Printf("<node/csr> received signed certificate from the API server:\n%s\n", fmtCert)
	fmt.Println("<node/csr> generating kubelet configuration")
	finalConfig := kubeadmutil.MakeClientConfigWithCerts(
		bareClientConfig, "kubernetes", fmt.Sprintf("kubelet-%s", nodeName),
		key, cert,
	)

	return finalConfig, nil
}
Example #24
0
func streamLocation(
	getter ResourceGetter,
	connInfo client.ConnectionInfoGetter,
	ctx api.Context,
	name string,
	opts runtime.Object,
	container,
	path string,
) (*url.URL, http.RoundTripper, error) {
	pod, err := getPod(getter, ctx, name)
	if err != nil {
		return nil, nil, err
	}

	// Try to figure out a container
	// If a container was provided, it must be valid
	if container == "" {
		switch len(pod.Spec.Containers) {
		case 1:
			container = pod.Spec.Containers[0].Name
		case 0:
			return nil, nil, errors.NewBadRequest(fmt.Sprintf("a container name must be specified for pod %s", name))
		default:
			containerNames := getContainerNames(pod.Spec.Containers)
			initContainerNames := getContainerNames(pod.Spec.InitContainers)
			err := fmt.Sprintf("a container name must be specified for pod %s, choose one of: [%s]", name, containerNames)
			if len(initContainerNames) > 0 {
				err += fmt.Sprintf(" or one of the init containers: [%s]", initContainerNames)
			}
			return nil, nil, errors.NewBadRequest(err)
		}
	} else {
		if !podHasContainerWithName(pod, container) {
			return nil, nil, errors.NewBadRequest(fmt.Sprintf("container %s is not valid for pod %s", container, name))
		}
	}
	nodeName := types.NodeName(pod.Spec.NodeName)
	if len(nodeName) == 0 {
		// If pod has not been assigned a host, return an empty location
		return nil, nil, errors.NewBadRequest(fmt.Sprintf("pod %s does not have a host assigned", name))
	}
	nodeInfo, err := connInfo.GetConnectionInfo(ctx, nodeName)
	if err != nil {
		return nil, nil, err
	}
	params := url.Values{}
	if err := streamParams(params, opts); err != nil {
		return nil, nil, err
	}
	loc := &url.URL{
		Scheme:   nodeInfo.Scheme,
		Host:     net.JoinHostPort(nodeInfo.Hostname, nodeInfo.Port),
		Path:     fmt.Sprintf("/%s/%s/%s/%s", path, pod.Namespace, pod.Name, container),
		RawQuery: params.Encode(),
	}
	return loc, nodeInfo.Transport, nil
}
Example #25
0
func watchFileAdded(watchDir bool, t *testing.T) {
	hostname := types.NodeName("random-test-hostname")
	var testCases = getTestCases(hostname)

	fileNamePre := "test_pod_config"
	for index, testCase := range testCases {
		func() {
			dirName, err := utiltesting.MkTmpdir("dir-test")
			if err != nil {
				t.Fatalf("unable to create temp dir: %v", err)
			}
			defer os.RemoveAll(dirName)
			fileName := fmt.Sprintf("%s_%d", fileNamePre, index)

			ch := make(chan interface{})
			if watchDir {
				NewSourceFile(dirName, hostname, 100*time.Millisecond, ch)
			} else {
				NewSourceFile(filepath.Join(dirName, fileName), hostname, 100*time.Millisecond, ch)
			}
			expectEmptyUpdate(t, ch)

			addFile := func() {
				// Add a file
				testCase.writeToFile(dirName, fileName, t)
			}

			if watchDir {
				defer func() {
					// Remove the file
					deleteFile(dirName, fileName, ch, t)
				}()
			}

			go addFile()

			if watchDir {
				// expect an update by CREATE inotify event
				expectUpdate(t, ch, testCase)
				// expect an update by MODIFY inotify event
				expectUpdate(t, ch, testCase)

				from := fileName
				fileName = fileName + "_ch"
				go changeFileName(dirName, from, fileName, t)
				// expect an update by MOVED_FROM inotify event cause changing file name
				expectEmptyUpdate(t, ch)
				// expect an update by MOVED_TO inotify event cause changing file name
				expectUpdate(t, ch, testCase)
			} else {
				// expect an update by SourceFile.resetStoreFromPath()
				expectUpdate(t, ch, testCase)
			}
		}()
	}
}
// EstablishMasterConnection establishes a connection with exactly one of the provided API endpoints.
// The function builds a client for every endpoint and concurrently keeps trying to connect to any one
// of the provided endpoints. Blocks until at least one connection is established, then it stops the
// connection attempts for other endpoints.
func EstablishMasterConnection(s *kubeadmapi.NodeConfiguration, clusterInfo *kubeadmapi.ClusterInfo) (*ConnectionDetails, error) {
	hostName, err := os.Hostname()
	if err != nil {
		return nil, fmt.Errorf("<node/bootstrap> failed to get node hostname [%v]", err)
	}
	// TODO(phase1+) https://github.com/kubernetes/kubernetes/issues/33641
	nodeName := types.NodeName(hostName)

	endpoints := clusterInfo.Endpoints
	caCert := []byte(clusterInfo.CertificateAuthorities[0])

	stopChan := make(chan struct{})
	result := make(chan *ConnectionDetails)
	var wg sync.WaitGroup
	for _, endpoint := range endpoints {
		clientSet, err := createClients(caCert, endpoint, s.Secrets.BearerToken, nodeName)
		if err != nil {
			fmt.Printf("<node/bootstrap> warning: %s. Skipping endpoint %s\n", err, endpoint)
			continue
		}
		wg.Add(1)
		go func(apiEndpoint string) {
			defer wg.Done()
			wait.Until(func() {
				fmt.Printf("<node/bootstrap> trying to connect to endpoint %s\n", apiEndpoint)
				err := checkAPIEndpoint(clientSet, apiEndpoint)
				if err != nil {
					fmt.Printf("<node/bootstrap> endpoint check failed [%v]\n", err)
					return
				}
				fmt.Printf("<node/bootstrap> successfully established connection with endpoint %s\n", apiEndpoint)
				// connection established, stop all wait threads
				close(stopChan)
				result <- &ConnectionDetails{
					CertClient: clientSet.CertificatesClient,
					Endpoint:   apiEndpoint,
					CACert:     caCert,
					NodeName:   nodeName,
				}
			}, retryTimeout*time.Second, stopChan)
		}(endpoint)
	}

	go func() {
		wg.Wait()
		// all wait.Until() calls have finished now
		close(result)
	}()

	establishedConnection, ok := <-result
	if !ok {
		return nil, fmt.Errorf("<node/bootstrap> failed to create bootstrap clients " +
			"for any of the provided API endpoints")
	}
	return establishedConnection, nil
}
Example #27
0
// List enumerates the set of nodes instances known by the cloud provider
func (v *OVirtCloud) List(filter string) ([]types.NodeName, error) {
	instances, err := v.fetchAllInstances()
	if err != nil {
		return nil, err
	}
	var nodeNames []types.NodeName
	for _, s := range instances.ListSortedNames() {
		nodeNames = append(nodeNames, types.NodeName(s))
	}
	return nodeNames, nil
}
// tryUpdateNodeStatus tries to update node status to master. If ReconcileCBR0
// is set, this function will also confirm that cbr0 is configured correctly.
func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
	// In large clusters, GET and PUT operations on Node objects coming
	// from here are the majority of load on apiserver and etcd.
	// To reduce the load on etcd, we are serving GET operations from
	// apiserver cache (the data might be slightly delayed but it doesn't
	// seem to cause more confilict - the delays are pretty small).
	// If it result in a conflict, all retries are served directly from etcd.
	// TODO: Currently apiserver doesn't support serving GET operations
	// from its cache. Thus we are hacking it by issuing LIST with
	// field selector for the name of the node (field selectors with
	// specified name are handled efficiently by apiserver). Once
	// apiserver supports GET from cache, change it here.
	opts := v1.ListOptions{
		FieldSelector: fields.Set{"metadata.name": string(kl.nodeName)}.AsSelector().String(),
	}
	if tryNumber == 0 {
		opts.ResourceVersion = "0"
	}
	nodes, err := kl.kubeClient.Core().Nodes().List(opts)
	if err != nil {
		return fmt.Errorf("error getting node %q: %v", kl.nodeName, err)
	}
	if len(nodes.Items) != 1 {
		return fmt.Errorf("no node instance returned for %q", kl.nodeName)
	}
	node := &nodes.Items[0]

	clonedNode, err := conversion.NewCloner().DeepCopy(node)
	if err != nil {
		return fmt.Errorf("error clone node %q: %v", kl.nodeName, err)
	}

	originalNode, ok := clonedNode.(*v1.Node)
	if !ok || originalNode == nil {
		return fmt.Errorf("failed to cast %q node object %#v to v1.Node", kl.nodeName, clonedNode)
	}

	kl.updatePodCIDR(node.Spec.PodCIDR)

	if err := kl.setNodeStatus(node); err != nil {
		return err
	}
	// Patch the current status on the API server
	updatedNode, err := nodeutil.PatchNodeStatus(kl.kubeClient, types.NodeName(kl.nodeName), originalNode, node)
	if err != nil {
		return err
	}
	// If update finishes sucessfully, mark the volumeInUse as reportedInUse to indicate
	// those volumes are already updated in the node's status
	kl.volumeManager.MarkVolumesAsReportedInUse(updatedNode.Status.VolumesInUse)
	return nil
}
func (adc *attachDetachController) nodeDelete(obj interface{}) {
	node, ok := obj.(*v1.Node)
	if node == nil || !ok {
		return
	}

	nodeName := types.NodeName(node.Name)
	if err := adc.desiredStateOfWorld.DeleteNode(nodeName); err != nil {
		glog.V(10).Infof("%v", err)
	}

	adc.processVolumesInUse(nodeName, node.Status.VolumesInUse)
}
func Test_GetAttachedVolumesForNode_Positive_NoVolumesOrNodes(t *testing.T) {
	// Arrange
	volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
	asw := NewActualStateOfWorld(volumePluginMgr)
	node := types.NodeName("random")

	// Act
	attachedVolumes := asw.GetAttachedVolumesForNode(node)

	// Assert
	if len(attachedVolumes) != 0 {
		t.Fatalf("len(attachedVolumes) Expected: <0> Actual: <%v>", len(attachedVolumes))
	}
}