コード例 #1
0
ファイル: petset.go プロジェクト: neujie/kubernetes
func (p *petSetTester) waitForRunning(numPets int32, ps *apps.PetSet) {
	pollErr := wait.PollImmediate(petsetPoll, petsetTimeout,
		func() (bool, error) {
			podList := p.getPodList(ps)
			if int32(len(podList.Items)) < numPets {
				framework.Logf("Found %d pets, waiting for %d", len(podList.Items), numPets)
				return false, nil
			}
			if int32(len(podList.Items)) > numPets {
				return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPets, len(podList.Items))
			}
			for _, p := range podList.Items {
				isReady := api.IsPodReady(&p)
				if p.Status.Phase != api.PodRunning || !isReady {
					framework.Logf("Waiting for pod %v to enter %v - Ready=True, currently %v - Ready=%v", p.Name, api.PodRunning, p.Status.Phase, isReady)
					return false, nil
				}
			}
			return true, nil
		})
	if pollErr != nil {
		framework.Failf("Failed waiting for pods to enter running: %v", pollErr)
	}

	p.waitForStatus(ps, numPets)
}
コード例 #2
0
ファイル: server.go プロジェクト: RomainVabre/origin
// WaitForPodCreationServiceAccounts ensures that the service account needed for pod creation exists
// and that the cache for the admission control that checks for pod tokens has caught up to allow
// pod creation.
func WaitForPodCreationServiceAccounts(client *kclient.Client, namespace string) error {
	if err := WaitForServiceAccounts(client, namespace, []string{bootstrappolicy.DefaultServiceAccountName}); err != nil {
		return err
	}

	testPod := &kapi.Pod{}
	testPod.GenerateName = "test"
	testPod.Spec.Containers = []kapi.Container{
		{
			Name:  "container",
			Image: "openshift/origin-pod:latest",
		},
	}

	return wait.PollImmediate(time.Second, PodCreationWaitTimeout, func() (bool, error) {
		pod, err := client.Pods(namespace).Create(testPod)
		if err != nil {
			glog.Warningf("Error attempting to create test pod: %v", err)
			return false, nil
		}
		err = client.Pods(namespace).Delete(pod.Name, kapi.NewDeleteOptions(0))
		if err != nil {
			return false, err
		}
		return true, nil
	})
}
コード例 #3
0
ファイル: petset.go プロジェクト: abutcher/kubernetes
func (p *statefulSetTester) waitForRunning(numPets int32, ps *apps.StatefulSet, shouldBeReady bool) {
	pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout,
		func() (bool, error) {
			podList := p.getPodList(ps)
			if int32(len(podList.Items)) < numPets {
				framework.Logf("Found %d stateful pods, waiting for %d", len(podList.Items), numPets)
				return false, nil
			}
			if int32(len(podList.Items)) > numPets {
				return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPets, len(podList.Items))
			}
			for _, p := range podList.Items {
				isReady := v1.IsPodReady(&p)
				desiredReadiness := shouldBeReady == isReady
				framework.Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, v1.PodRunning, shouldBeReady, p.Status.Phase, isReady)
				if p.Status.Phase != v1.PodRunning || !desiredReadiness {
					return false, nil
				}
			}
			return true, nil
		})
	if pollErr != nil {
		framework.Failf("Failed waiting for pods to enter running: %v", pollErr)
	}
}
コード例 #4
0
ファイル: scale.go プロジェクト: AdoHe/kubernetes
// Scale updates a ReplicationController to a new size, with optional precondition check (if preconditions is not nil),
// optional retries (if retry is not nil), and then optionally waits for it's replica count to reach the new value
// (if wait is not nil).
func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
	if preconditions == nil {
		preconditions = &ScalePrecondition{-1, ""}
	}
	if retry == nil {
		// Make it try only once, immediately
		retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
	}
	cond := ScaleCondition(scaler, preconditions, namespace, name, newSize)
	if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
		return err
	}
	if waitForReplicas != nil {
		watchOptions := api.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", name), ResourceVersion: "0"}
		watcher, err := scaler.c.ReplicationControllers(namespace).Watch(watchOptions)
		if err != nil {
			return err
		}
		_, err = watch.Until(waitForReplicas.Timeout, watcher, func(event watch.Event) (bool, error) {
			if event.Type != watch.Added && event.Type != watch.Modified {
				return false, nil
			}

			rc := event.Object.(*api.ReplicationController)
			return rc.Status.ObservedGeneration >= rc.Generation && rc.Status.Replicas == rc.Spec.Replicas, nil
		})
		if err == wait.ErrWaitTimeout {
			return fmt.Errorf("timed out waiting for %q to be synced", name)
		}
		return err
	}
	return nil
}
コード例 #5
0
// verifies the cacheWatcher.process goroutine is properly cleaned up even if
// the writes to cacheWatcher.result channel is blocked.
func TestCacheWatcherCleanupNotBlockedByResult(t *testing.T) {
	var lock sync.RWMutex
	count := 0
	filter := func(string, labels.Set, fields.Set) bool { return true }
	forget := func(bool) {
		lock.Lock()
		defer lock.Unlock()
		count++
	}
	initEvents := []watchCacheEvent{
		{Object: &api.Pod{}},
		{Object: &api.Pod{}},
	}
	// set the size of the buffer of w.result to 0, so that the writes to
	// w.result is blocked.
	w := newCacheWatcher(0, 0, initEvents, filter, forget)
	w.Stop()
	if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) {
		lock.RLock()
		defer lock.RUnlock()
		return count == 2, nil
	}); err != nil {
		t.Fatalf("expected forget() to be called twice, because sendWatchCacheEvent should not be blocked by the result channel: %v", err)
	}
}
コード例 #6
0
ファイル: federation-util.go プロジェクト: nak3/kubernetes
func cleanupServiceShardLoadBalancer(clusterName string, service *v1.Service, timeout time.Duration) error {
	provider := framework.TestContext.CloudConfig.Provider
	if provider == nil {
		return fmt.Errorf("cloud provider undefined")
	}

	internalSvc := &v1.Service{}
	err := api.Scheme.Convert(service, internalSvc, nil)
	if err != nil {
		return fmt.Errorf("failed to convert versioned service object to internal type: %v", err)
	}

	err = wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
		lbi, supported := provider.LoadBalancer()
		if !supported {
			return false, fmt.Errorf("%q doesn't support load balancers", provider.ProviderName())
		}
		err := lbi.EnsureLoadBalancerDeleted(clusterName, internalSvc)
		if err != nil {
			// Deletion failed with an error, try again.
			framework.Logf("Failed to delete cloud provider resources for service %q in namespace %q, in cluster %q", service.Name, service.Namespace, clusterName)
			return false, nil
		}
		By(fmt.Sprintf("Cloud provider resources for Service %q in namespace %q in cluster %q deleted", service.Name, service.Namespace, clusterName))
		return true, nil
	})
	return err
}
コード例 #7
0
ファイル: disruption.go プロジェクト: paralin/kubernetes
func waitForPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
	By("Waiting for all pods to be running")
	err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
		pods, err := cs.Core().Pods(ns).List(v1.ListOptions{LabelSelector: "foo=bar"})
		if err != nil {
			return false, err
		}
		if pods == nil {
			return false, fmt.Errorf("pods is nil")
		}
		if len(pods.Items) < n {
			framework.Logf("pods: %v < %v", len(pods.Items), n)
			return false, nil
		}
		ready := 0
		for i := 0; i < n; i++ {
			if pods.Items[i].Status.Phase == v1.PodRunning {
				ready++
			}
		}
		if ready < n {
			framework.Logf("running pods: %v < %v", ready, n)
			return false, nil
		}
		return true, nil
	})
	framework.ExpectNoError(err, "Waiting for pods in namespace %q to be ready", ns)
}
コード例 #8
0
ファイル: petset.go プロジェクト: abutcher/kubernetes
func (p *statefulSetTester) scale(ps *apps.StatefulSet, count int32) error {
	name := ps.Name
	ns := ps.Namespace
	p.update(ns, name, func(ps *apps.StatefulSet) { *(ps.Spec.Replicas) = count })

	var petList *v1.PodList
	pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
		petList = p.getPodList(ps)
		if int32(len(petList.Items)) == count {
			return true, nil
		}
		return false, nil
	})
	if pollErr != nil {
		unhealthy := []string{}
		for _, pet := range petList.Items {
			delTs, phase, readiness := pet.DeletionTimestamp, pet.Status.Phase, v1.IsPodReady(&pet)
			if delTs != nil || phase != v1.PodRunning || !readiness {
				unhealthy = append(unhealthy, fmt.Sprintf("%v: deletion %v, phase %v, readiness %v", pet.Name, delTs, phase, readiness))
			}
		}
		return fmt.Errorf("Failed to scale statefulset to %d in %v. Remaining pods:\n%v", count, statefulsetTimeout, unhealthy)
	}
	return nil
}
コード例 #9
0
ファイル: framework.go プロジェクト: huang195/kubernetes
func (f *Framework) deleteFederationNs() {
	ns := f.FederationNamespace
	By(fmt.Sprintf("Destroying federation namespace %q for this suite.", ns.Name))
	timeout := 5 * time.Minute
	if f.NamespaceDeletionTimeout != 0 {
		timeout = f.NamespaceDeletionTimeout
	}

	clientset := f.FederationClientset_1_4
	// First delete the namespace from federation apiserver.
	if err := clientset.Core().Namespaces().Delete(ns.Name, &api.DeleteOptions{}); err != nil {
		Failf("Error while deleting federation namespace %s: %s", ns.Name, err)
	}
	// Verify that it got deleted.
	err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) {
		if _, err := clientset.Core().Namespaces().Get(ns.Name); err != nil {
			if apierrs.IsNotFound(err) {
				return true, nil
			}
			Logf("Error while waiting for namespace to be terminated: %v", err)
			return false, nil
		}
		return false, nil
	})
	if err != nil {
		if !apierrs.IsNotFound(err) {
			Failf("Couldn't delete ns %q: %s", ns.Name, err)
		} else {
			Logf("Namespace %v was already deleted", ns.Name)
		}
	}

	// TODO: Delete the namespace from underlying clusters.
}
コード例 #10
0
// waitForFederatedServiceShard waits until the number of shards of a given federated
// service reaches the expected value, i.e. numSvcs in the given individual Kubernetes
// cluster. If the shard count, i.e. numSvcs is expected to be at least one, then
// it also checks if the first shard's name and spec matches that of the given service.
func waitForFederatedServiceShard(cs *release_1_3.Clientset, namespace string, service *api.Service, numSvcs int) {
	By("Fetching a federated service shard")
	var clSvcList *v1.ServiceList
	if err := wait.PollImmediate(framework.Poll, FederatedServiceTimeout, func() (bool, error) {
		var err error
		clSvcList, err = cs.Core().Services(namespace).List(api.ListOptions{})
		if err != nil {
			return false, err
		}
		n := len(clSvcList.Items)
		if n == numSvcs {
			return true, nil
		}
		framework.Logf("%d services found, waiting for %d, trying again in %s", n, numSvcs, framework.Poll)
		return false, nil
	}); err != nil {
		framework.Failf("Failed to list registered clusters: %+v", err)
	}

	if numSvcs > 0 && service != nil {
		// Renaming for clarity/readability
		clSvc := clSvcList.Items[0]
		Expect(clSvc.Name).To(Equal(service.Name))
		Expect(clSvc.Spec).To(Equal(service.Spec))
	}
}
コード例 #11
0
ファイル: ensure.go プロジェクト: golddiga/origin
// ensureComponentAuthorizationRules initializes the cluster policies
func (c *MasterConfig) ensureComponentAuthorizationRules() {
	clusterPolicyRegistry := clusterpolicyregistry.NewRegistry(clusterpolicystorage.NewStorage(c.EtcdHelper))
	ctx := kapi.WithNamespace(kapi.NewContext(), "")

	if _, err := clusterPolicyRegistry.GetClusterPolicy(ctx, authorizationapi.PolicyName); kapierror.IsNotFound(err) {
		glog.Infof("No cluster policy found.  Creating bootstrap policy based on: %v", c.Options.PolicyConfig.BootstrapPolicyFile)

		if err := admin.OverwriteBootstrapPolicy(c.EtcdHelper, c.Options.PolicyConfig.BootstrapPolicyFile, admin.CreateBootstrapPolicyFileFullCommand, true, ioutil.Discard); err != nil {
			glog.Errorf("Error creating bootstrap policy: %v", err)
		}

	} else {
		glog.V(2).Infof("Ignoring bootstrap policy file because cluster policy found")
	}

	// Wait until the policy cache has caught up before continuing
	review := &authorizationapi.SubjectAccessReview{Action: authorizationapi.AuthorizationAttributes{Verb: "get", Resource: "clusterpolicies"}}
	err := wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (done bool, err error) {
		result, err := c.PolicyClient().SubjectAccessReviews().Create(review)
		if err == nil && result.Allowed {
			return true, nil
		}
		if kapierror.IsForbidden(err) || (err == nil && !result.Allowed) {
			glog.V(2).Infof("waiting for policy cache to initialize")
			return false, nil
		}
		return false, err
	})
	if err != nil {
		glog.Errorf("error waiting for policy cache to initialize: %v", err)
	}
}
コード例 #12
0
ファイル: disruption.go プロジェクト: juanluisvaladas/origin
func waitForPodsOrDie(cs *release_1_4.Clientset, ns string, n int) {
	By("Waiting for all pods to be running")
	err := wait.PollImmediate(framework.Poll, 10*time.Minute, func() (bool, error) {
		selector, err := labels.Parse("foo=bar")
		framework.ExpectNoError(err, "Waiting for pods in namespace %q to be ready", ns)
		pods, err := cs.Core().Pods(ns).List(api.ListOptions{LabelSelector: selector})
		if err != nil {
			return false, err
		}
		if pods == nil {
			return false, fmt.Errorf("pods is nil")
		}
		if len(pods.Items) < n {
			framework.Logf("pods: %v < %v", len(pods.Items), n)
			return false, nil
		}
		ready := 0
		for i := 0; i < n; i++ {
			if pods.Items[i].Status.Phase == apiv1.PodRunning {
				ready++
			}
		}
		if ready < n {
			framework.Logf("running pods: %v < %v", ready, n)
			return false, nil
		}
		return true, nil
	})
	framework.ExpectNoError(err, "Waiting for pods in namespace %q to be ready", ns)
}
コード例 #13
0
ファイル: drain.go プロジェクト: nak3/kubernetes
func (o *DrainOptions) waitForDelete(pods []api.Pod, interval, timeout time.Duration, usingEviction bool, getPodFn func(string, string) (*api.Pod, error)) ([]api.Pod, error) {
	var verbStr string
	if usingEviction {
		verbStr = "evicted"
	} else {
		verbStr = "deleted"
	}
	err := wait.PollImmediate(interval, timeout, func() (bool, error) {
		pendingPods := []api.Pod{}
		for i, pod := range pods {
			p, err := getPodFn(pod.Namespace, pod.Name)
			if apierrors.IsNotFound(err) || (p != nil && p.ObjectMeta.UID != pod.ObjectMeta.UID) {
				cmdutil.PrintSuccess(o.mapper, false, o.out, "pod", pod.Name, false, verbStr)
				continue
			} else if err != nil {
				return false, err
			} else {
				pendingPods = append(pendingPods, pods[i])
			}
		}
		pods = pendingPods
		if len(pendingPods) > 0 {
			return false, nil
		}
		return true, nil
	})
	return pods, err
}
コード例 #14
0
// GetSelfURL executes a curl against the given path via kubectl exec into a
// test container running with host networking, and fails if the output
// doesn't match the expected string.
func (config *NetworkingTestConfig) GetSelfURL(path string, expected string) {
	cmd := fmt.Sprintf("curl -q -s --connect-timeout 1 http://localhost:10249%s", path)
	By(fmt.Sprintf("Getting kube-proxy self URL %s", path))

	// These are arbitrary timeouts. The curl command should pass on first try,
	// unless kubeproxy is starved/bootstrapping/restarting etc.
	const retryInterval = 1 * time.Second
	const retryTimeout = 30 * time.Second
	podName := config.HostTestContainerPod.Name
	var msg string
	if pollErr := wait.PollImmediate(retryInterval, retryTimeout, func() (bool, error) {
		stdout, err := RunHostCmd(config.Namespace, podName, cmd)
		if err != nil {
			msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err)
			Logf(msg)
			return false, nil
		}
		if !strings.Contains(stdout, expected) {
			msg = fmt.Sprintf("successfully executed %v in %v/%v, but output '%v' doesn't contain expected string '%v'", cmd, config.Namespace, podName, stdout, expected)
			Logf(msg)
			return false, nil
		}
		return true, nil
	}); pollErr != nil {
		Logf("\nOutput of kubectl describe pod %v/%v:\n", config.Namespace, podName)
		desc, _ := RunKubectl(
			"describe", "pod", podName, fmt.Sprintf("--namespace=%v", config.Namespace))
		Logf("%s", desc)
		Failf("Timed out in %v: %v", retryTimeout, msg)
	}
}
コード例 #15
0
ファイル: utils.go プロジェクト: upmc-enterprises/contrib
// waitForPodCondition waits for a pod in state defined by a condition (func)
func waitForPodCondition(kubeClient *unversioned.Client, ns, podName string, condition func(pod *api.Pod) (bool, error),
	interval, timeout time.Duration) error {
	err := wait.PollImmediate(interval, timeout, func() (bool, error) {
		pod, err := kubeClient.Pods(ns).Get(podName)
		if err != nil {
			if apierrs.IsNotFound(err) {
				return false, nil
			}
		}

		done, err := condition(pod)
		if err != nil {
			return false, err
		}
		if done {
			return true, nil
		}

		return false, nil
	})

	if err != nil {
		return fmt.Errorf("timed out waiting to observe own status as Running")
	}

	return nil
}
コード例 #16
0
ファイル: petset.go プロジェクト: ipbabble/kubernetes
func (p *petTester) restart(ps *apps.PetSet) {
	name := ps.Name
	ns := ps.Namespace
	oldReplicas := ps.Spec.Replicas
	p.update(ns, name, func(ps *apps.PetSet) { ps.Spec.Replicas = 0 })

	var petList *api.PodList
	pollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) {
		petList = p.getPodList(ps)
		if len(petList.Items) == 0 {
			return true, nil
		}
		return false, nil
	})
	if pollErr != nil {
		ts := []string{}
		for _, pet := range petList.Items {
			if pet.DeletionTimestamp != nil {
				ts = append(ts, fmt.Sprintf("%v", pet.DeletionTimestamp.Time))
			}
		}
		framework.Failf("Failed to scale petset down to 0, %d remaining pods with deletion timestamps: %v", len(petList.Items), ts)
	}
	p.update(ns, name, func(ps *apps.PetSet) { ps.Spec.Replicas = oldReplicas })
}
コード例 #17
0
ファイル: service.go プロジェクト: johnmccawley/origin
func testNotReachable(ip string, port int) {
	url := fmt.Sprintf("http://%s:%d", ip, port)
	if ip == "" {
		Failf("Got empty IP for non-reachability check (%s)", url)
	}
	if port == 0 {
		Failf("Got port==0 for non-reachability check (%s)", url)
	}

	desc := fmt.Sprintf("the url %s to be *not* reachable", url)
	By(fmt.Sprintf("Waiting up to %v for %s", podStartTimeout, desc))
	err := wait.PollImmediate(poll, podStartTimeout, func() (bool, error) {
		resp, err := httpGetNoConnectionPool(url)
		if err != nil {
			Logf("Successfully waited for %s", desc)
			return true, nil
		}
		defer resp.Body.Close()
		body, err := ioutil.ReadAll(resp.Body)
		if err != nil {
			Logf("Expecting %s to be unreachable but was reachable and got an error reading response: %v", url, err)
			return false, nil
		}
		Logf("Able to reach service %s when should no longer have been reachable, status: %q and body: %s", url, resp.Status, string(body))
		return false, nil
	})
	Expect(err).NotTo(HaveOccurred(), "Error waiting for %s", desc)
}
コード例 #18
0
ファイル: test_helper.go プロジェクト: spxtr/kubernetes
// Ensure a key is in the store before returning (or timeout w/ error)
func WaitForStoreUpdate(store util.FederatedReadOnlyStore, clusterName, key string, timeout time.Duration) error {
	retryInterval := 100 * time.Millisecond
	err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) {
		_, found, err := store.GetByKey(clusterName, key)
		return found, err
	})
	return err
}
コード例 #19
0
ファイル: ensure.go プロジェクト: RomainVabre/origin
// ensureComponentAuthorizationRules initializes the cluster policies
func (c *MasterConfig) ensureComponentAuthorizationRules() {
	clusterPolicyRegistry := clusterpolicyregistry.NewRegistry(clusterpolicystorage.NewStorage(c.EtcdHelper))
	ctx := kapi.WithNamespace(kapi.NewContext(), "")

	if _, err := clusterPolicyRegistry.GetClusterPolicy(ctx, authorizationapi.PolicyName); kapierror.IsNotFound(err) {
		glog.Infof("No cluster policy found.  Creating bootstrap policy based on: %v", c.Options.PolicyConfig.BootstrapPolicyFile)

		if err := admin.OverwriteBootstrapPolicy(c.EtcdHelper, c.Options.PolicyConfig.BootstrapPolicyFile, admin.CreateBootstrapPolicyFileFullCommand, true, ioutil.Discard); err != nil {
			glog.Errorf("Error creating bootstrap policy: %v", err)
		}

	} else {
		glog.V(2).Infof("Ignoring bootstrap policy file because cluster policy found")
	}

	// Wait until the policy cache has caught up before continuing
	review := &authorizationapi.SubjectAccessReview{Action: authorizationapi.AuthorizationAttributes{Verb: "get", Group: authorizationapi.GroupName, Resource: "clusterpolicies"}}
	err := wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (done bool, err error) {
		result, err := c.PolicyClient().SubjectAccessReviews().Create(review)
		if err == nil && result.Allowed {
			return true, nil
		}
		if kapierror.IsForbidden(err) || (err == nil && !result.Allowed) {
			glog.V(2).Infof("waiting for policy cache to initialize")
			return false, nil
		}
		return false, err
	})
	if err != nil {
		glog.Errorf("error waiting for policy cache to initialize: %v", err)
	}

	// Reconcile roles that must exist for the cluster to function
	// Be very judicious about what is placed in this list, since it will be enforced on every server start
	reconcileRoles := &policy.ReconcileClusterRolesOptions{
		RolesToReconcile: []string{bootstrappolicy.DiscoveryRoleName},
		Confirmed:        true,
		Union:            true,
		Out:              ioutil.Discard,
		RoleClient:       c.PrivilegedLoopbackOpenShiftClient.ClusterRoles(),
	}
	if err := reconcileRoles.RunReconcileClusterRoles(nil, nil); err != nil {
		glog.Errorf("Could not auto reconcile roles: %v\n", err)
	}

	// Reconcile rolebindings that must exist for the cluster to function
	// Be very judicious about what is placed in this list, since it will be enforced on every server start
	reconcileRoleBindings := &policy.ReconcileClusterRoleBindingsOptions{
		RolesToReconcile:  []string{bootstrappolicy.DiscoveryRoleName},
		Confirmed:         true,
		Union:             true,
		Out:               ioutil.Discard,
		RoleBindingClient: c.PrivilegedLoopbackOpenShiftClient.ClusterRoleBindings(),
	}
	if err := reconcileRoleBindings.RunReconcileClusterRoleBindings(nil, nil); err != nil {
		glog.Errorf("Could not auto reconcile role bindings: %v\n", err)
	}
}
コード例 #20
0
func testReachableInTime(testFunc wait.ConditionFunc, timeout time.Duration) bool {
	By(fmt.Sprintf("Waiting up to %v", timeout))
	err := wait.PollImmediate(framework.Poll, timeout, testFunc)
	if err != nil {
		Expect(err).NotTo(HaveOccurred(), "Error waiting")
		return false
	}
	return true
}
コード例 #21
0
ファイル: scale.go プロジェクト: alex-mohr/kubernetes
// Scale updates a ReplicationController to a new size, with optional precondition check (if preconditions is not nil),
// optional retries (if retry is not nil), and then optionally waits for it's replica count to reach the new value
// (if wait is not nil).
func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
	if preconditions == nil {
		preconditions = &ScalePrecondition{-1, ""}
	}
	if retry == nil {
		// Make it try only once, immediately
		retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
	}
	var updatedResourceVersion string
	cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, &updatedResourceVersion)
	if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
		return err
	}
	if waitForReplicas != nil {
		checkRC := func(rc *api.ReplicationController) bool {
			if uint(rc.Spec.Replicas) != newSize {
				// the size is changed by other party. Don't need to wait for the new change to complete.
				return true
			}
			return rc.Status.ObservedGeneration >= rc.Generation && rc.Status.Replicas == rc.Spec.Replicas
		}
		// If number of replicas doesn't change, then the update may not event
		// be sent to underlying databse (we don't send no-op changes).
		// In such case, <updatedResourceVersion> will have value of the most
		// recent update (which may be far in the past) so we may get "too old
		// RV" error from watch or potentially no ReplicationController events
		// will be deliver, since it may already be in the expected state.
		// To protect from these two, we first issue Get() to ensure that we
		// are not already in the expected state.
		currentRC, err := scaler.c.ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
		if err != nil {
			return err
		}
		if !checkRC(currentRC) {
			watchOptions := api.ListOptions{
				FieldSelector:   fields.OneTermEqualSelector("metadata.name", name),
				ResourceVersion: updatedResourceVersion,
			}
			watcher, err := scaler.c.ReplicationControllers(namespace).Watch(watchOptions)
			if err != nil {
				return err
			}
			_, err = watch.Until(waitForReplicas.Timeout, watcher, func(event watch.Event) (bool, error) {
				if event.Type != watch.Added && event.Type != watch.Modified {
					return false, nil
				}
				return checkRC(event.Object.(*api.ReplicationController)), nil
			})
			if err == wait.ErrWaitTimeout {
				return fmt.Errorf("timed out waiting for %q to be synced", name)
			}
			return err
		}
	}
	return nil
}
コード例 #22
0
ファイル: evictions_test.go プロジェクト: nak3/kubernetes
// wait for the podInformer to observe the pods. Call this function before
// running the RS controller to prevent the rc manager from creating new pods
// rather than adopting the existing ones.
func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podNum int) {
	if err := wait.PollImmediate(2*time.Second, 60*time.Second, func() (bool, error) {
		objects := podInformer.GetIndexer().List()
		if len(objects) == podNum {
			return true, nil
		}
		return false, nil
	}); err != nil {
		t.Fatal(err)
	}
}
コード例 #23
0
ファイル: federated-ingress.go プロジェクト: cheld/kubernetes
// waitForFederatedIngressExists waits for the Ingress object exists.
func waitForFederatedIngressExists(c *federation_release_1_4.Clientset, ns, ingName string, timeout time.Duration) error {
	err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
		_, err := c.Extensions().Ingresses(ns).Get(ingName)
		if err != nil {
			framework.Logf("Waiting for Ingress %v, error %v", ingName, err)
			return false, nil
		}
		return true, nil
	})
	return err
}
コード例 #24
0
ファイル: newapp.go プロジェクト: poomsujarit/origin
func followInstallation(f *clientcmd.Factory, input string, pod *kapi.Pod, kclient kclient.Interface, out io.Writer) error {
	fmt.Fprintf(out, "--> Installing ...\n")

	// we cannot retrieve logs until the pod is out of pending
	// TODO: move this to the server side
	podClient := kclient.Pods(pod.Namespace)
	if err := wait.PollImmediate(500*time.Millisecond, 60*time.Second, installationStarted(podClient, pod.Name, kclient.Secrets(pod.Namespace))); err != nil {
		return err
	}

	mapper, typer := f.Object()
	opts := &kcmd.LogsOptions{
		Namespace:   pod.Namespace,
		ResourceArg: pod.Name,
		Options: &kapi.PodLogOptions{
			Follow:    true,
			Container: pod.Spec.Containers[0].Name,
		},
		Mapper:        mapper,
		Typer:         typer,
		ClientMapper:  resource.ClientMapperFunc(f.ClientForMapping),
		LogsForObject: f.LogsForObject,
		Out:           out,
	}
	_, logErr := opts.RunLogs()

	// status of the pod may take tens of seconds to propagate
	if err := wait.PollImmediate(500*time.Millisecond, 30*time.Second, installationComplete(podClient, pod.Name, out)); err != nil {
		if err == wait.ErrWaitTimeout {
			if logErr != nil {
				// output the log error if one occurred
				err = logErr
			} else {
				err = fmt.Errorf("installation may not have completed, see logs for %q for more information", pod.Name)
			}
		}
		return err
	}

	return nil
}
コード例 #25
0
ファイル: kubernetes_client.go プロジェクト: spxtr/contrib
func (k *kubernetesClient) CountNodes() (uint64, error) {
	err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
		if k.reflector.LastSyncResourceVersion() == "" {
			return false, nil
		}
		return true, nil
	})
	if err != nil {
		return 0, err
	}
	return uint64(len(k.nodeStore.List())), nil
}
コード例 #26
0
ファイル: config.go プロジェクト: vjsamuel/kubernetes
func ReadUrl(url string, client *http.Client, header *http.Header) (body []byte, err error) {
	retryInterval := time.Second
	wait.PollImmediate(retryInterval, readURLTimeout, func() (bool, error) {
		body, err = readUrl(url, client, header)
		if err != nil {
			glog.V(4).Infof("Error reading %q: %v", url, err)
			return false, nil
		}
		return true, nil
	})
	return body, err
}
コード例 #27
0
// Wait for ingress status to be updated to match the desiredStatus.
func WaitForStatusUpdate(t *testing.T, store util.FederatedReadOnlyStore, clusterName, key string, desiredStatus api_v1.LoadBalancerStatus, timeout time.Duration) error {
	retryInterval := 100 * time.Millisecond
	err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) {
		obj, found, err := store.GetByKey(clusterName, key)
		if !found || err != nil {
			return false, err
		}
		ingress := obj.(*extensions_v1beta1.Ingress)
		return reflect.DeepEqual(ingress.Status.LoadBalancer, desiredStatus), nil
	})
	return err
}
コード例 #28
0
ファイル: clusterquota_test.go プロジェクト: Xmagicer/origin
func waitForQuotaLabeling(clusterAdminClient client.AppliedClusterResourceQuotasNamespacer, namespaceName string) error {
	return utilwait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (done bool, err error) {
		list, err := clusterAdminClient.AppliedClusterResourceQuotas(namespaceName).List(kapi.ListOptions{})
		if err != nil {
			return false, nil
		}
		if len(list.Items) > 0 && len(list.Items[0].Status.Total.Hard) > 0 {
			return true, nil
		}
		return false, nil
	})
}
コード例 #29
0
func waitForLatestCondition(oc *exutil.CLI, name string, timeout time.Duration, fn deploymentConditionFunc) error {
	return wait.PollImmediate(200*time.Millisecond, timeout, func() (bool, error) {
		dc, rcs, pods, err := deploymentInfo(oc, name)
		if err != nil {
			return false, err
		}
		if err := checkDeploymentInvariants(dc, rcs, pods); err != nil {
			return false, err
		}
		return fn(dc, rcs, pods)
	})
}
コード例 #30
0
// Wait till the store is updated with latest secret.
func WaitForSecretStoreUpdate(store util.FederatedReadOnlyStore, clusterName, key string, desiredSecret *api_v1.Secret, timeout time.Duration) error {
	retryInterval := 100 * time.Millisecond
	err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) {
		obj, found, err := store.GetByKey(clusterName, key)
		if !found || err != nil {
			return false, err
		}
		equal := secretsEqual(*obj.(*api_v1.Secret), *desiredSecret)
		return equal, err
	})
	return err
}