Пример #1
1
func checkDeploymentConfigs(r diagnosticReporter, adapter deploymentConfigAdapter, project string) {
	req, _ := labels.NewRequirement(loggingInfraKey, selection.Exists, nil)
	selector := labels.NewSelector().Add(*req)
	r.Debug("AGL0040", fmt.Sprintf("Checking for DeploymentConfigs in project '%s' with selector '%s'", project, selector))
	dcList, err := adapter.deploymentconfigs(project, kapi.ListOptions{LabelSelector: selector})
	if err != nil {
		r.Error("AGL0045", err, fmt.Sprintf("There was an error while trying to retrieve the DeploymentConfigs in project '%s': %s", project, err))
		return
	}
	if len(dcList.Items) == 0 {
		r.Error("AGL0047", nil, fmt.Sprintf("Did not find any matching DeploymentConfigs in project '%s' which means no logging components were deployed.  Try running the installer.", project))
		return
	}
	found := sets.NewString()
	for _, entry := range dcList.Items {
		comp := labels.Set(entry.ObjectMeta.Labels).Get(componentKey)
		found.Insert(comp)
		r.Debug("AGL0050", fmt.Sprintf("Found DeploymentConfig '%s' for component '%s'", entry.ObjectMeta.Name, comp))
	}
	for _, entry := range loggingComponents.List() {
		exists := found.Has(entry)
		if !exists {
			if strings.HasSuffix(entry, "-ops") {
				r.Info("AGL0060", fmt.Sprintf(deploymentConfigWarnMissingForOps, entry))
			} else {
				r.Error("AGL0065", nil, fmt.Sprintf("Did not find a DeploymentConfig to support component '%s'", entry))
			}
		}
	}
	checkDeploymentConfigPods(r, adapter, *dcList, project)
}
Пример #2
0
// WaitForADeployment waits for a Deployment to fulfill the isOK function
func WaitForADeployment(client kclient.ReplicationControllerInterface,
	name string,
	isOK, isFailed func(*kapi.ReplicationController) bool) error {
	startTime := time.Now()
	endTime := startTime.Add(15 * time.Minute)
	for time.Now().Before(endTime) {
		requirement, err := labels.NewRequirement(deployapi.DeploymentConfigAnnotation, labels.EqualsOperator, sets.NewString(name))
		if err != nil {
			return fmt.Errorf("unexpected error generating label selector: %v", err)
		}

		list, err := client.List(kapi.ListOptions{LabelSelector: labels.NewSelector().Add(*requirement)})
		if err != nil {
			return err
		}
		for i := range list.Items {
			if isOK(&list.Items[i]) {
				return nil
			}
			if isFailed(&list.Items[i]) {
				return fmt.Errorf("The deployment %q status is %q",
					name, list.Items[i].Annotations[deployapi.DeploymentStatusAnnotation])
			}
		}

		rv := list.ResourceVersion
		w, err := client.Watch(kapi.ListOptions{LabelSelector: labels.NewSelector().Add(*requirement), ResourceVersion: rv})
		if err != nil {
			return err
		}
		defer w.Stop()

		for {
			val, ok := <-w.ResultChan()
			if !ok {
				// reget and re-watch
				break
			}
			if e, ok := val.Object.(*kapi.ReplicationController); ok {
				if isOK(e) {
					return nil
				}
				if isFailed(e) {
					return fmt.Errorf("The deployment %q status is %q",
						name, e.Annotations[deployapi.DeploymentStatusAnnotation])
				}
			}
		}
	}
	return fmt.Errorf("the deploy did not finish within 3 minutes")
}
Пример #3
0
// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements
// labels.Selector.
func NodeSelectorRequirementsAsSelector(nsm []NodeSelectorRequirement) (labels.Selector, error) {
	if len(nsm) == 0 {
		return labels.Nothing(), nil
	}
	selector := labels.NewSelector()
	for _, expr := range nsm {
		var op labels.Operator
		switch expr.Operator {
		case NodeSelectorOpIn:
			op = labels.InOperator
		case NodeSelectorOpNotIn:
			op = labels.NotInOperator
		case NodeSelectorOpExists:
			op = labels.ExistsOperator
		case NodeSelectorOpDoesNotExist:
			op = labels.DoesNotExistOperator
		case NodeSelectorOpGt:
			op = labels.GreaterThanOperator
		case NodeSelectorOpLt:
			op = labels.LessThanOperator
		default:
			return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator)
		}
		r, err := labels.NewRequirement(expr.Key, op, sets.NewString(expr.Values...))
		if err != nil {
			return nil, err
		}
		selector = selector.Add(*r)
	}
	return selector, nil
}
Пример #4
0
func validateRouter(c *k8sclient.Client, f *cmdutil.Factory) (Result, error) {
	ns, _, err := f.DefaultNamespace()
	if err != nil {
		return Failure, err
	}
	requirement, err := labels.NewRequirement("router", labels.EqualsOperator, sets.NewString("router"))
	if err != nil {
		return Failure, err
	}
	label := labels.NewSelector().Add(*requirement)

	rc, err := c.ReplicationControllers(ns).List(api.ListOptions{LabelSelector: label})
	if err != nil {
		util.Fatalf("Failed to get PersistentVolumeClaims, %s in namespace %s\n", err, ns)
	}
	if rc != nil {
		items := rc.Items
		if len(items) > 0 {
			return Success, err
		}
	}
	//util.Fatalf("No router running in namespace %s\n", ns)
	// TODO lets create a router
	return Failure, err
}
Пример #5
0
// WaitForRegistry waits until a newly deployed registry becomes ready. If waitForDCVersion is given, the
// function will wait until a corresponding replica controller completes. If not give, the latest version of
// registry's deployment config will be fetched from etcd.
func WaitForRegistry(
	dcNamespacer client.DeploymentConfigsNamespacer,
	kubeClient kclient.Interface,
	waitForDCVersion *int64,
	oc *CLI,
) error {
	var latestVersion int64
	start := time.Now()

	if waitForDCVersion != nil {
		latestVersion = *waitForDCVersion
	} else {
		dc, err := dcNamespacer.DeploymentConfigs(kapi.NamespaceDefault).Get("docker-registry")
		if err != nil {
			return err
		}
		latestVersion = dc.Status.LatestVersion
	}
	fmt.Fprintf(g.GinkgoWriter, "waiting for deployment of version %d to complete\n", latestVersion)

	err := WaitForADeployment(kubeClient.ReplicationControllers(kapi.NamespaceDefault), "docker-registry",
		func(rc *kapi.ReplicationController) bool {
			if !CheckDeploymentCompletedFn(rc) {
				return false
			}
			v, err := strconv.ParseInt(rc.Annotations[deployapi.DeploymentVersionAnnotation], 10, 64)
			if err != nil {
				fmt.Fprintf(g.GinkgoWriter, "failed to parse %q of replication controller %q: %v\n", deployapi.DeploymentVersionAnnotation, rc.Name, err)
				return false
			}
			return v >= latestVersion
		},
		func(rc *kapi.ReplicationController) bool {
			v, err := strconv.ParseInt(rc.Annotations[deployapi.DeploymentVersionAnnotation], 10, 64)
			if err != nil {
				fmt.Fprintf(g.GinkgoWriter, "failed to parse %q of replication controller %q: %v\n", deployapi.DeploymentVersionAnnotation, rc.Name, err)
				return false
			}
			if v < latestVersion {
				return false
			}
			return CheckDeploymentFailedFn(rc)
		}, oc)
	if err != nil {
		return err
	}

	requirement, err := labels.NewRequirement(deployapi.DeploymentLabel, selection.Equals, sets.NewString(fmt.Sprintf("docker-registry-%d", latestVersion)))
	pods, err := WaitForPods(kubeClient.Pods(kapi.NamespaceDefault), labels.NewSelector().Add(*requirement), CheckPodIsReadyFn, 1, time.Minute)
	now := time.Now()
	fmt.Fprintf(g.GinkgoWriter, "deployed registry pod %s after %s\n", pods[0], now.Sub(start).String())
	return err
}
Пример #6
0
// TestPolicyBindingListRespectingLabels tests that a List() call for some namespace, filtered with a label to the ReadOnlyPolicyBindingCache
// will return all policyBindings in that namespace matching that label
func TestPolicyBindingListRespectingLabels(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicybindingcache()
	defer close(cacheChannel)

	var policyBindings *authorizationapi.PolicyBindingList
	var err error

	desiredName := "nonUniquePolicyBindingName"
	namespace := "namespaceTwo"
	key := "labelToMatchOn"
	operator := labels.EqualsOperator
	val := sets.NewString("someValue")
	requirement, err := labels.NewRequirement(key, operator, val)
	if err != nil {
		t.Errorf("labels.Selector misconstructed: %v", err)
	}

	label := labels.NewSelector().Add(*requirement)

	util.Until(func() {
		policyBindings, err = testCache.List(&unversioned.ListOptions{LabelSelector: unversioned.LabelSelector{Selector: label}}, namespace)

		if (err == nil) &&
			(policyBindings != nil) &&
			(len(policyBindings.Items) == 1) &&
			(policyBindings.Items[0].Name == desiredName) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting policyBindingList using ReadOnlyPolicyBindingCache: %v", err)
	case policyBindings == nil:
		t.Error("PolicyBindingList is nil.")
	case len(policyBindings.Items) != 1:
		t.Errorf("Expected policyBindingList to have 1 item, had %d", len(policyBindings.Items))
	case policyBindings.Items[0].Name != desiredName:
		t.Errorf("Expected policyBinding name to be '%s', was '%s'", desiredName, policyBindings.Items[0].Name)
	}
}
Пример #7
0
// TestClusterPolicyListRespectingLabels tests that a List(), filtered with a label to the ReadOnlyClusterPolicyCache
// will return all clusterPolicies matching that label
func TestClusterPolicyListRespectingLabels(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlyclusterpolicycache()
	defer close(cacheChannel)

	var clusterPolicies *authorizationapi.ClusterPolicyList
	var err error

	desiredName := "uniqueClusterPolicyName"
	key := "labelToMatchOn"
	operator := labels.EqualsOperator
	val := sets.NewString("someValue")
	requirement, err := labels.NewRequirement(key, operator, val)
	if err != nil {
		t.Errorf("labels.Selector misconstructed: %v", err)
	}

	label := labels.NewSelector().Add(*requirement)

	utilwait.Until(func() {
		clusterPolicies, err = testCache.List(&kapi.ListOptions{LabelSelector: label})

		if (err == nil) &&
			(clusterPolicies != nil) &&
			(len(clusterPolicies.Items) == 1) &&
			(clusterPolicies.Items[0].Name == desiredName) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting clusterPolicyList with labelSelector using ReadOnlyClusterPolicyCache: %v", err)
	case clusterPolicies == nil:
		t.Error("ClusterPolicyList is nil.")
	case len(clusterPolicies.Items) != 1:
		t.Errorf("Expected clusterPolicyList to contain 2 clusterPolicies, contained %d", len(clusterPolicies.Items))
	case clusterPolicies.Items[0].Name != desiredName:
		t.Errorf("Expected label-selected clusterPolicy name to be '%s', was '%s'", desiredName, clusterPolicies.Items[0].Name)
	}
}
Пример #8
0
// TestClusterPolicyBindingListRespectingLabels tests that a List(), filtered with a label to the ReadOnlyClusterPolicyBindingCache
// will return all clusterPolicyBindings matching that label
func TestClusterPolicyBindingListRespectingLabels(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlyclusterpolicybindingcache()
	defer close(cacheChannel)

	var clusterPolicyBindings *authorizationapi.ClusterPolicyBindingList
	var err error

	desiredName := "uniqueClusterPolicyBindingName"
	key := "labelToMatchOn"
	operator := labels.EqualsOperator
	val := sets.NewString("someValue")
	requirement, err := labels.NewRequirement(key, operator, val)
	if err != nil {
		t.Errorf("labels.Selector misconstructed: %v", err)
	}

	label := labels.NewSelector().Add(*requirement)

	util.Until(func() {
		clusterPolicyBindings, err = testCache.List(&unversioned.ListOptions{LabelSelector: unversioned.LabelSelector{Selector: label}})

		if (err == nil) &&
			(clusterPolicyBindings != nil) &&
			(len(clusterPolicyBindings.Items) == 1) &&
			(clusterPolicyBindings.Items[0].Name == desiredName) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting clusterPolicyBinding with labelSelector using ReadOnlyClusterBindingCache: %v", err)
	case clusterPolicyBindings == nil:
		t.Error("ClusterPolicyBindingList using labelSelector is nil")
	case len(clusterPolicyBindings.Items) != 1:
		t.Errorf("Expected clusterPolicyBindingList using labelSelector to contain 1 item, had %d", len(clusterPolicyBindings.Items))
	case clusterPolicyBindings.Items[0].Name != desiredName:
		t.Errorf("Expected clusterPolicyBinding to have name '%s', had '%s'", desiredName, clusterPolicyBindings.Items[0].Name)
	}
}
Пример #9
0
func checkDeploymentConfigPods(r diagnosticReporter, adapter deploymentConfigAdapter, dcs deployapi.DeploymentConfigList, project string) {
	compReq, _ := labels.NewRequirement(componentKey, selection.In, loggingComponents)
	provReq, _ := labels.NewRequirement(providerKey, selection.Equals, sets.NewString(openshiftValue))
	podSelector := labels.NewSelector().Add(*compReq, *provReq)
	r.Debug("AGL0070", fmt.Sprintf("Getting pods that match selector '%s'", podSelector))
	podList, err := adapter.pods(project, kapi.ListOptions{LabelSelector: podSelector})
	if err != nil {
		r.Error("AGL0075", err, fmt.Sprintf("There was an error while trying to retrieve the pods for the AggregatedLogging stack: %s", err))
		return
	}
	if len(podList.Items) == 0 {
		r.Error("AGL0080", nil, fmt.Sprintf(deploymentConfigZeroPodsFound, project))
		return
	}
	dcPodCount := make(map[string]int, len(dcs.Items))
	for _, dc := range dcs.Items {
		dcPodCount[dc.ObjectMeta.Name] = 0
	}

	for _, pod := range podList.Items {
		r.Debug("AGL0082", fmt.Sprintf("Checking status of Pod '%s'...", pod.ObjectMeta.Name))
		dcName, hasDcName := pod.ObjectMeta.Annotations[deployapi.DeploymentConfigAnnotation]
		if !hasDcName {
			r.Warn("AGL0085", nil, fmt.Sprintf("Found Pod '%s' that that does not reference a logging deployment config which may be acceptable. Skipping check to see if its running.", pod.ObjectMeta.Name))
			continue
		}
		if pod.Status.Phase != kapi.PodRunning {
			podName := pod.ObjectMeta.Name
			r.Error("AGL0090", nil, fmt.Sprintf(deploymentConfigPodsNotRunning, podName, dcName, kapi.PodRunning, pod.Status.Phase, project))
		}
		count, _ := dcPodCount[dcName]
		dcPodCount[dcName] = count + 1
	}
	for name, count := range dcPodCount {
		if count == 0 {
			r.Error("AGL0095", nil, fmt.Sprintf(deploymentConfigNoPodsFound, name, project))
		}
	}
}
Пример #10
0
// LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements
// labels.Selector
func LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) {
	if ps == nil {
		return labels.Nothing(), nil
	}
	if len(ps.MatchLabels)+len(ps.MatchExpressions) == 0 {
		return labels.Everything(), nil
	}
	selector := labels.NewSelector()
	for k, v := range ps.MatchLabels {
		r, err := labels.NewRequirement(k, labels.InOperator, sets.NewString(v))
		if err != nil {
			return nil, err
		}
		selector = selector.Add(*r)
	}
	for _, expr := range ps.MatchExpressions {
		var op labels.Operator
		switch expr.Operator {
		case LabelSelectorOpIn:
			op = labels.InOperator
		case LabelSelectorOpNotIn:
			op = labels.NotInOperator
		case LabelSelectorOpExists:
			op = labels.ExistsOperator
		case LabelSelectorOpDoesNotExist:
			op = labels.DoesNotExistOperator
		default:
			return nil, fmt.Errorf("%q is not a valid pod selector operator", expr.Operator)
		}
		r, err := labels.NewRequirement(expr.Key, op, sets.NewString(expr.Values...))
		if err != nil {
			return nil, err
		}
		selector = selector.Add(*r)
	}
	return selector, nil
}
Пример #11
0
// WaitForADeployment waits for a deployment to fulfill either isOK or isFailed.
// When isOK returns true, WaitForADeployment returns nil, when isFailed returns
// true, WaitForADeployment returns an error including the deployment status.
// WaitForADeployment waits for at most a certain timeout (non-configurable).
func WaitForADeployment(client kclient.ReplicationControllerInterface, name string, isOK, isFailed func(*kapi.ReplicationController) bool, oc *CLI) error {
	timeout := 15 * time.Minute

	// closing done signals that any pending operation should be aborted.
	done := make(chan struct{})
	defer close(done)

	// okOrFailed returns whether a replication controller matches either of
	// the predicates isOK or isFailed, and the associated error in case of
	// failure.
	okOrFailed := func(rc *kapi.ReplicationController) (err error, matched bool) {
		if isOK(rc) {
			return nil, true
		}
		if isFailed(rc) {
			return fmt.Errorf("The deployment %q status is %q", name, rc.Annotations[deployapi.DeploymentStatusAnnotation]), true
		}
		return nil, false
	}

	// waitForDeployment waits until okOrFailed returns true or the done
	// channel is closed.
	waitForDeployment := func() (err error, retry bool) {
		requirement, err := labels.NewRequirement(deployapi.DeploymentConfigAnnotation, selection.Equals, sets.NewString(name))
		if err != nil {
			return fmt.Errorf("unexpected error generating label selector: %v", err), false
		}
		list, err := client.List(kapi.ListOptions{LabelSelector: labels.NewSelector().Add(*requirement)})
		if err != nil {
			return err, false
		}
		// multiple deployments are conceivable; so we look to see how the latest depoy does
		var lastRC *kapi.ReplicationController
		for _, rc := range list.Items {
			if lastRC == nil {
				lastRC = &rc
				continue
			}
			// assuming won't have to deal with more than 9 deployments
			if lastRC.GetName() <= rc.GetName() {
				lastRC = &rc
			}
		}

		if lastRC != nil {
			err, matched := okOrFailed(lastRC)
			if matched {
				return err, false
			}
		}

		w, err := client.Watch(kapi.ListOptions{LabelSelector: labels.NewSelector().Add(*requirement), ResourceVersion: list.ResourceVersion})
		if err != nil {
			return err, false
		}
		defer w.Stop()
		for {
			select {
			case val, ok := <-w.ResultChan():
				if !ok {
					// watcher error, re-get and re-watch
					return nil, true
				}
				if rc, ok := val.Object.(*kapi.ReplicationController); ok {
					if lastRC == nil {
						lastRC = rc
					}
					// multiple deployments are conceivable; so we look to see how the latest depoy does
					if lastRC.GetName() <= rc.GetName() {
						lastRC = rc
						err, matched := okOrFailed(rc)
						if matched {
							return err, false
						}
					}
				}
			case <-done:
				// no more time left, stop what we were doing,
				// do no retry.
				return nil, false
			}
		}
	}

	// errCh is buffered so the goroutine below never blocks on sending,
	// preventing a goroutine leak if we reach the timeout.
	errCh := make(chan error, 1)

	go func() {
		defer close(errCh)
		err, retry := waitForDeployment()
		for retry {
			err, retry = waitForDeployment()
		}
		errCh <- err
	}()

	select {
	case err := <-errCh:
		if err != nil {
			DumpDeploymentLogs(name, oc)
		}
		return err
	case <-time.After(timeout):
		DumpDeploymentLogs(name, oc)
		// end for timing issues where we miss watch updates
		return fmt.Errorf("timed out waiting for deployment %q after %v", name, timeout)
	}
}