func (config *NetworkingTestConfig) DeleteNetProxyPod() {
	pod := config.EndpointPods[0]
	config.getPodClient().Delete(pod.Name, api.NewDeleteOptions(0))
	config.EndpointPods = config.EndpointPods[1:]
	// wait for pod being deleted.
	err := framework.WaitForPodToDisappear(config.f.Client, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
	if err != nil {
		framework.Failf("Failed to delete %s pod: %v", pod.Name, err)
	}
	// wait for endpoint being removed.
	err = framework.WaitForServiceEndpointsNum(config.f.Client, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout)
	if err != nil {
		framework.Failf("Failed to remove endpoint from service: %s", nodePortServiceName)
	}
	// wait for kube-proxy to catch up with the pod being deleted.
	time.Sleep(5 * time.Second)
}
// deletePodsSync deletes a list of pods and block until pods disappear.
func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
	var wg sync.WaitGroup
	for _, pod := range pods {
		wg.Add(1)
		go func(pod *v1.Pod) {
			defer wg.Done()

			err := f.PodClient().Delete(pod.ObjectMeta.Name, v1.NewDeleteOptions(30))
			Expect(err).NotTo(HaveOccurred())

			Expect(framework.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
				30*time.Second, 10*time.Minute)).NotTo(HaveOccurred())
		}(pod)
	}
	wg.Wait()
	return
}
示例#3
0
			By("choose a node with at least one pod - we will block some network traffic on this node")
			options := api.ListOptions{LabelSelector: label}
			pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled
			Expect(err).NotTo(HaveOccurred())
			nodeName := pods.Items[0].Spec.NodeName

			node, err := c.Core().Nodes().Get(nodeName)
			Expect(err).NotTo(HaveOccurred())

			// This creates a temporary network partition, verifies that the job has 'parallelism' number of
			// running pods after the node-controller detects node unreachable.
			By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
			testUnderTemporaryNetworkFailure(c, ns, node, func() {
				framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
				err := framework.WaitForPodToDisappear(c, ns, pods.Items[0].Name, label, 20*time.Second, 10*time.Minute)
				Expect(err).To(Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")

				By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
				_, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
				Expect(err).NotTo(HaveOccurred())
			})

			framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
			if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
				framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
			}
		})
	})
})
示例#4
0
			Expect(framework.IssueSSHCommand(injectCommand(permMessage, 1), framework.TestContext.Provider, node)).To(Succeed())
			By("Make sure the corresponding node condition is generated")
			Eventually(func() error {
				return verifyCondition(c.Nodes(), node.Name, condition, api.ConditionTrue, permReason, permMessage)
			}, pollTimeout, pollInterval).Should(Succeed())
			By("Make sure no new events are generated")
			Consistently(func() error {
				return verifyEvents(c.Events(eventNamespace), eventListOptions, num, tempReason, tempMessage)
			}, pollConsistent, pollInterval).Should(Succeed())
		})

		AfterEach(func() {
			By("Delete the node problem detector")
			c.Pods(ns).Delete(name, api.NewDeleteOptions(0))
			By("Wait for the node problem detector to disappear")
			Expect(framework.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(Succeed())
			By("Delete the config map")
			c.ConfigMaps(ns).Delete(configName)
			By("Clean up the events")
			Expect(c.Events(eventNamespace).DeleteCollection(api.NewDeleteOptions(0), eventListOptions)).To(Succeed())
			By("Clean up the node condition")
			patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition))
			c.Patch(api.StrategicMergePatchType).Resource("nodes").Name(node.Name).SubResource("status").Body(patch).Do()
			By("Clean up the temporary directory")
			framework.IssueSSHCommand(fmt.Sprintf("rm -r %s", tmpDir), framework.TestContext.Provider, node)
		})
	})
})

// verifyEvents verifies there are num specific events generated
func verifyEvents(e client.EventInterface, options api.ListOptions, num int, reason, message string) error {