コード例 #1
0
ファイル: deployment.go プロジェクト: leecalcote/kubernetes
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
func testDeploymentCleanUpPolicy(f *Framework) {
	ns := f.Namespace.Name
	unversionedClient := f.Client
	c := clientset.FromUnversionedClient(unversionedClient)
	// Create nginx pods.
	deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
	rsPodLabels := map[string]string{
		"name": "cleanup-pod",
		"pod":  "nginx",
	}
	rsName := "nginx-controller"
	replicas := 1
	revisionHistoryLimit := util.IntPtr(0)
	_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, "nginx", "nginx"))
	Expect(err).NotTo(HaveOccurred())

	// Verify that the required pods have come up.
	err = verifyPods(unversionedClient, ns, "cleanup-pod", false, 1)
	if err != nil {
		Logf("error in waiting for pods to come up: %s", err)
		Expect(err).NotTo(HaveOccurred())
	}

	// Create a deployment to delete nginx pods and instead bring up redis pods.
	deploymentName := "redis-deployment"
	Logf("Creating deployment %s", deploymentName)
	_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, revisionHistoryLimit))
	Expect(err).NotTo(HaveOccurred())
	defer stopDeployment(c, f.Client, ns, deploymentName)

	err = waitForDeploymentOldRSsNum(c, ns, deploymentName, *revisionHistoryLimit)
	Expect(err).NotTo(HaveOccurred())
}
コード例 #2
0
ファイル: hollow_proxy.go プロジェクト: CNDonny/scope
func NewHollowProxyOrDie(
	nodeName string,
	client *client.Client,
	endpointsConfig *proxyconfig.EndpointsConfig,
	serviceConfig *proxyconfig.ServiceConfig,
	iptInterface utiliptables.Interface,
	broadcaster record.EventBroadcaster,
	recorder record.EventRecorder,
) *HollowProxy {
	// Create and start Hollow Proxy
	config := options.NewProxyConfig()
	config.OOMScoreAdj = util.IntPtr(0)
	config.ResourceContainer = ""
	config.NodeRef = &api.ObjectReference{
		Kind:      "Node",
		Name:      nodeName,
		UID:       types.UID(nodeName),
		Namespace: "",
	}
	proxyconfig.NewSourceAPI(
		client,
		30*time.Second,
		serviceConfig.Channel("api"),
		endpointsConfig.Channel("api"),
	)

	hollowProxy, err := proxyapp.NewProxyServer(client, config, iptInterface, &FakeProxier{}, broadcaster, recorder, nil, "fake")
	if err != nil {
		glog.Fatalf("Error while creating ProxyServer: %v\n", err)
	}
	return &HollowProxy{
		ProxyServer: hollowProxy,
	}
}
コード例 #3
0
ファイル: stop.go プロジェクト: CNDonny/scope
func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
	deployments := reaper.Extensions().Deployments(namespace)
	replicaSets := reaper.Extensions().ReplicaSets(namespace)
	rsReaper, _ := ReaperFor(extensions.Kind("ReplicaSet"), reaper)

	deployment, err := reaper.updateDeploymentWithRetries(namespace, name, func(d *extensions.Deployment) {
		// set deployment's history and scale to 0
		// TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527
		d.Spec.RevisionHistoryLimit = util.IntPtr(0)
		d.Spec.Replicas = 0
		d.Spec.Paused = true
	})
	if err != nil {
		return err
	}

	// Use observedGeneration to determine if the deployment controller noticed the pause.
	if err := deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) {
		return deployments.Get(name)
	}, deployment.Generation, 10*time.Millisecond, 1*time.Minute); err != nil {
		return err
	}

	// Stop all replica sets.
	selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
	if err != nil {
		return err
	}

	options := api.ListOptions{LabelSelector: selector}
	rsList, err := replicaSets.List(options)
	if err != nil {
		return err
	}
	errList := []error{}
	for _, rc := range rsList.Items {
		if err := rsReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil {
			if !errors.IsNotFound(err) {
				errList = append(errList, err)
			}
		}
	}
	if len(errList) > 0 {
		return utilerrors.NewAggregate(errList)
	}

	// Delete deployment at the end.
	// Note: We delete deployment at the end so that if removing RSs fails, we atleast have the deployment to retry.
	return deployments.Delete(name, nil)
}
コード例 #4
0
ファイル: options.go プロジェクト: XiaoningDing/UbernetesPOC
func NewProxyConfig() *ProxyServerConfig {
	return &ProxyServerConfig{
		KubeProxyConfiguration: componentconfig.KubeProxyConfiguration{
			BindAddress:                    "0.0.0.0",
			HealthzPort:                    10249,
			HealthzBindAddress:             "127.0.0.1",
			OOMScoreAdj:                    util.IntPtr(qos.KubeProxyOOMScoreAdj),
			ResourceContainer:              "/kube-proxy",
			IPTablesSyncPeriod:             unversioned.Duration{30 * time.Second},
			UDPIdleTimeout:                 unversioned.Duration{250 * time.Millisecond},
			ConntrackMax:                   256 * 1024,                                     // 4x default (64k)
			ConntrackTCPEstablishedTimeout: unversioned.Duration{Duration: 24 * time.Hour}, // 1 day (1/5 default)
		},
		KubeAPIQPS:       5.0,
		KubeAPIBurst:     10,
		ConfigSyncPeriod: 15 * time.Minute,
	}
}
コード例 #5
0
ファイル: deployment.go プロジェクト: sittercity/kubernetes
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
func testDeploymentCleanUpPolicy(f *Framework) {
	ns := f.Namespace.Name
	unversionedClient := f.Client
	c := clientset.FromUnversionedClient(unversionedClient)
	// Create nginx pods.
	deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
	rsPodLabels := map[string]string{
		"name": "cleanup-pod",
		"pod":  nginxImageName,
	}
	rsName := "test-cleanup-controller"
	replicas := 1
	revisionHistoryLimit := util.IntPtr(0)
	_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage))
	Expect(err).NotTo(HaveOccurred())

	// Verify that the required pods have come up.
	err = verifyPods(unversionedClient, ns, "cleanup-pod", false, 1)
	if err != nil {
		Logf("error in waiting for pods to come up: %s", err)
		Expect(err).NotTo(HaveOccurred())
	}

	// Create a deployment to delete nginx pods and instead bring up redis pods.
	deploymentName := "test-cleanup-deployment"
	Logf("Creating deployment %s", deploymentName)

	pods, err := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
	if err != nil {
		Expect(err).NotTo(HaveOccurred(), "Failed to query for pods: %v", err)
	}
	options := api.ListOptions{
		ResourceVersion: pods.ListMeta.ResourceVersion,
	}
	stopCh := make(chan struct{})
	w, err := c.Pods(ns).Watch(options)
	go func() {
		// There should be only one pod being created, which is the pod with the redis image.
		// The old RS shouldn't create new pod when deployment controller adding pod template hash label to its selector.
		numPodCreation := 1
		for {
			select {
			case event, _ := <-w.ResultChan():
				if event.Type != watch.Added {
					continue
				}
				numPodCreation--
				if numPodCreation < 0 {
					Failf("Expect only one pod creation, the second creation event: %#v\n", event)
				}
				pod, ok := event.Object.(*api.Pod)
				if !ok {
					Fail("Expect event Object to be a pod")
				}
				if pod.Spec.Containers[0].Name != redisImageName {
					Failf("Expect the created pod to have container name %s, got pod %#v\n", redisImageName, pod)
				}
			case <-stopCh:
				return
			}
		}
	}()
	_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RollingUpdateDeploymentStrategyType, revisionHistoryLimit))
	Expect(err).NotTo(HaveOccurred())
	defer stopDeployment(c, f.Client, ns, deploymentName)

	err = waitForDeploymentOldRSsNum(c, ns, deploymentName, *revisionHistoryLimit)
	Expect(err).NotTo(HaveOccurred())
	close(stopCh)
}