Beispiel #1
0
// WaitForRunningDeployerPod waits a given period of time until the deployer pod
// for given replication controller is not running.
func WaitForRunningDeployerPod(podClient kcoreclient.PodsGetter, rc *api.ReplicationController, timeout time.Duration) error {
	podName := DeployerPodNameForDeployment(rc.Name)
	canGetLogs := func(p *api.Pod) bool {
		return api.PodSucceeded == p.Status.Phase || api.PodFailed == p.Status.Phase || api.PodRunning == p.Status.Phase
	}
	pod, err := podClient.Pods(rc.Namespace).Get(podName)
	if err == nil && canGetLogs(pod) {
		return nil
	}
	watcher, err := podClient.Pods(rc.Namespace).Watch(
		api.ListOptions{
			FieldSelector: fields.Set{"metadata.name": podName}.AsSelector(),
		},
	)
	if err != nil {
		return err
	}

	defer watcher.Stop()
	if _, err := watch.Until(timeout, watcher, func(e watch.Event) (bool, error) {
		if e.Type == watch.Error {
			return false, fmt.Errorf("encountered error while watching for pod: %v", e.Object)
		}
		obj, isPod := e.Object.(*api.Pod)
		if !isPod {
			return false, errors.New("received unknown object while watching for pods")
		}
		return canGetLogs(obj), nil
	}); err != nil {
		return err
	}
	return nil
}
Beispiel #2
0
func waitForQuota(t *testing.T, quota *v1.ResourceQuota, clientset *clientset.Clientset) {
	w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: quota.Name}))
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	if _, err := clientset.Core().ResourceQuotas(quota.Namespace).Create(quota); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	_, err = watch.Until(1*time.Minute, w, func(event watch.Event) (bool, error) {
		switch event.Type {
		case watch.Modified:
		default:
			return false, nil
		}

		switch cast := event.Object.(type) {
		case *v1.ResourceQuota:
			if len(cast.Status.Hard) > 0 {
				return true, nil
			}
		}

		return false, nil
	})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
}
Beispiel #3
0
// Scale updates a ReplicationController to a new size, with optional precondition check (if preconditions is not nil),
// optional retries (if retry is not nil), and then optionally waits for it's replica count to reach the new value
// (if wait is not nil).
func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
	if preconditions == nil {
		preconditions = &ScalePrecondition{-1, ""}
	}
	if retry == nil {
		// Make it try only once, immediately
		retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
	}
	cond := ScaleCondition(scaler, preconditions, namespace, name, newSize)
	if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
		return err
	}
	if waitForReplicas != nil {
		watchOptions := api.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", name), ResourceVersion: "0"}
		watcher, err := scaler.c.ReplicationControllers(namespace).Watch(watchOptions)
		if err != nil {
			return err
		}
		_, err = watch.Until(waitForReplicas.Timeout, watcher, func(event watch.Event) (bool, error) {
			if event.Type != watch.Added && event.Type != watch.Modified {
				return false, nil
			}

			rc := event.Object.(*api.ReplicationController)
			return rc.Status.ObservedGeneration >= rc.Generation && rc.Status.Replicas == rc.Spec.Replicas, nil
		})
		if err == wait.ErrWaitTimeout {
			return fmt.Errorf("timed out waiting for %q to be synced", name)
		}
		return err
	}
	return nil
}
Beispiel #4
0
// WaitForRunningDeployment waits until the specified deployment is no longer New or Pending. Returns true if
// the deployment became running, complete, or failed within timeout, false if it did not, and an error if any
// other error state occurred. The last observed deployment state is returned.
func WaitForRunningDeployment(rn kclient.ReplicationControllersNamespacer, observed *kapi.ReplicationController, timeout time.Duration) (*kapi.ReplicationController, bool, error) {
	fieldSelector := fields.Set{"metadata.name": observed.Name}.AsSelector()
	options := kapi.ListOptions{FieldSelector: fieldSelector, ResourceVersion: observed.ResourceVersion}
	w, err := rn.ReplicationControllers(observed.Namespace).Watch(options)
	if err != nil {
		return observed, false, err
	}
	defer w.Stop()

	if _, err := watch.Until(timeout, w, func(e watch.Event) (bool, error) {
		if e.Type == watch.Error {
			return false, fmt.Errorf("encountered error while watching for replication controller: %v", e.Object)
		}
		obj, isController := e.Object.(*kapi.ReplicationController)
		if !isController {
			return false, fmt.Errorf("received unknown object while watching for deployments: %v", obj)
		}
		observed = obj
		switch deployutil.DeploymentStatusFor(observed) {
		case api.DeploymentStatusRunning, api.DeploymentStatusFailed, api.DeploymentStatusComplete:
			return true, nil
		case api.DeploymentStatusNew, api.DeploymentStatusPending:
			return false, nil
		default:
			return false, ErrUnknownDeploymentPhase
		}
	}); err != nil {
		return observed, false, err
	}

	return observed, true, nil
}
Beispiel #5
0
func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
	target := int32(100)
	rc := &v1.ReplicationController{
		ObjectMeta: v1.ObjectMeta{
			Name:      "foo",
			Namespace: namespace,
		},
		Spec: v1.ReplicationControllerSpec{
			Replicas: &target,
			Selector: map[string]string{"foo": "bar"},
			Template: &v1.PodTemplateSpec{
				ObjectMeta: v1.ObjectMeta{
					Labels: map[string]string{
						"foo": "bar",
					},
				},
				Spec: v1.PodSpec{
					Containers: []v1.Container{
						{
							Name:  "container",
							Image: "busybox",
						},
					},
				},
			},
		},
	}

	w, err := clientset.Core().ReplicationControllers(namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: rc.Name}))
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	if _, err := clientset.Core().ReplicationControllers(namespace).Create(rc); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	_, err = watch.Until(3*time.Minute, w, func(event watch.Event) (bool, error) {
		switch event.Type {
		case watch.Modified:
		default:
			return false, nil
		}

		switch cast := event.Object.(type) {
		case *v1.ReplicationController:
			fmt.Printf("Found %v of %v replicas\n", int(cast.Status.Replicas), target)
			if cast.Status.Replicas == target {
				return true, nil
			}
		}

		return false, nil
	})
	if err != nil {
		pods, _ := clientset.Core().Pods(namespace).List(v1.ListOptions{LabelSelector: labels.Everything().String(), FieldSelector: fields.Everything().String()})
		t.Fatalf("unexpected error: %v, ended with %v pods", err, len(pods.Items))
	}
}
Beispiel #6
0
// Scale updates a ReplicationController to a new size, with optional precondition check (if preconditions is not nil),
// optional retries (if retry is not nil), and then optionally waits for it's replica count to reach the new value
// (if wait is not nil).
func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
	if preconditions == nil {
		preconditions = &ScalePrecondition{-1, ""}
	}
	if retry == nil {
		// Make it try only once, immediately
		retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
	}
	var updatedResourceVersion string
	cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, &updatedResourceVersion)
	if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
		return err
	}
	if waitForReplicas != nil {
		checkRC := func(rc *api.ReplicationController) bool {
			if uint(rc.Spec.Replicas) != newSize {
				// the size is changed by other party. Don't need to wait for the new change to complete.
				return true
			}
			return rc.Status.ObservedGeneration >= rc.Generation && rc.Status.Replicas == rc.Spec.Replicas
		}
		// If number of replicas doesn't change, then the update may not event
		// be sent to underlying databse (we don't send no-op changes).
		// In such case, <updatedResourceVersion> will have value of the most
		// recent update (which may be far in the past) so we may get "too old
		// RV" error from watch or potentially no ReplicationController events
		// will be deliver, since it may already be in the expected state.
		// To protect from these two, we first issue Get() to ensure that we
		// are not already in the expected state.
		currentRC, err := scaler.c.ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
		if err != nil {
			return err
		}
		if !checkRC(currentRC) {
			watchOptions := api.ListOptions{
				FieldSelector:   fields.OneTermEqualSelector("metadata.name", name),
				ResourceVersion: updatedResourceVersion,
			}
			watcher, err := scaler.c.ReplicationControllers(namespace).Watch(watchOptions)
			if err != nil {
				return err
			}
			_, err = watch.Until(waitForReplicas.Timeout, watcher, func(event watch.Event) (bool, error) {
				if event.Type != watch.Added && event.Type != watch.Modified {
					return false, nil
				}
				return checkRC(event.Object.(*api.ReplicationController)), nil
			})
			if err == wait.ErrWaitTimeout {
				return fmt.Errorf("timed out waiting for %q to be synced", name)
			}
			return err
		}
	}
	return nil
}
Beispiel #7
0
// waitForPod watches the given pod until the exitCondition is true. Each two seconds
// the tick function is called e.g. for progress output.
func waitForPod(podClient coreclient.PodsGetter, ns, name string, exitCondition watch.ConditionFunc, tick func(*api.Pod)) (*api.Pod, error) {
	w, err := podClient.Pods(ns).Watch(api.SingleObject(api.ObjectMeta{Name: name}))
	if err != nil {
		return nil, err
	}

	pods := make(chan *api.Pod) // observed pods passed to the exitCondition
	defer close(pods)

	// wait for the first event, then start the 2 sec ticker and loop
	go func() {
		pod := <-pods
		if pod == nil {
			return
		}
		tick(pod)

		t := time.NewTicker(2 * time.Second)
		defer t.Stop()

		for {
			select {
			case pod = <-pods:
				if pod == nil {
					return
				}
			case _, ok := <-t.C:
				if !ok {
					return
				}
				tick(pod)
			}
		}
	}()

	intr := interrupt.New(nil, w.Stop)
	var result *api.Pod
	err = intr.Run(func() error {
		ev, err := watch.Until(0, w, func(ev watch.Event) (bool, error) {
			c, err := exitCondition(ev)
			if c == false && err == nil {
				pods <- ev.Object.(*api.Pod) // send to ticker
			}
			return c, err
		})
		result = ev.Object.(*api.Pod)
		return err
	})
	return result, err
}
func TestBootstrapping(t *testing.T) {
	superUser := "******"

	masterConfig := framework.NewIntegrationTestMasterConfig()
	masterConfig.GenericConfig.Authorizer = newRBACAuthorizer(t, superUser, masterConfig)
	masterConfig.GenericConfig.Authenticator = newFakeAuthenticator()
	masterConfig.GenericConfig.AuthorizerRBACSuperUser = superUser
	_, s := framework.RunAMaster(masterConfig)
	defer s.Close()

	clientset := clientset.NewForConfigOrDie(&restclient.Config{BearerToken: superUser, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}})

	watcher, err := clientset.Rbac().ClusterRoles().Watch(api.ListOptions{ResourceVersion: "0"})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	_, err = watch.Until(30*time.Second, watcher, func(event watch.Event) (bool, error) {
		if event.Type != watch.Added {
			return false, nil
		}
		return true, nil
	})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	clusterRoles, err := clientset.Rbac().ClusterRoles().List(api.ListOptions{})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if len(clusterRoles.Items) == 0 {
		t.Fatalf("missing cluster roles")
	}

	for _, clusterRole := range clusterRoles.Items {
		if clusterRole.Name == "cluster-admin" {
			return
		}
	}

	t.Errorf("missing cluster-admin: %v", clusterRoles)

	healthBytes, err := clientset.Discovery().RESTClient().Get().AbsPath("/healthz/poststarthooks/rbac/bootstrap-roles").DoRaw()
	if err != nil {
		t.Error(err)
	}
	t.Errorf("expected %v, got %v", "asdf", string(healthBytes))
}
Beispiel #9
0
// waitForToken uses `cmd.Until` to wait for the service account controller to fulfill the token request
func waitForToken(token *api.Secret, serviceAccount *api.ServiceAccount, timeout time.Duration, client kcoreclient.SecretInterface) (*api.Secret, error) {
	// there is no provided rounding function, so we use Round(x) === Floor(x + 0.5)
	timeoutSeconds := int64(math.Floor(timeout.Seconds() + 0.5))

	options := api.ListOptions{
		FieldSelector:   fields.SelectorFromSet(fields.Set(map[string]string{"metadata.name": token.Name})),
		Watch:           true,
		ResourceVersion: token.ResourceVersion,
		TimeoutSeconds:  &timeoutSeconds,
	}

	watcher, err := client.Watch(options)
	if err != nil {
		return nil, fmt.Errorf("could not begin watch for token: %v", err)
	}

	event, err := watch.Until(timeout, watcher, func(event watch.Event) (bool, error) {
		if event.Type == watch.Error {
			return false, fmt.Errorf("encountered error while watching for token: %v", event.Object)
		}

		eventToken, ok := event.Object.(*api.Secret)
		if !ok {
			return false, nil
		}

		if eventToken.Name != token.Name {
			return false, nil
		}

		switch event.Type {
		case watch.Modified:
			if serviceaccounts.IsValidServiceAccountToken(serviceAccount, eventToken) {
				return true, nil
			}
		case watch.Deleted:
			return false, errors.New("token was deleted before fulfillment by service account token controller")
		case watch.Added:
			return false, errors.New("unxepected action: token was added after initial creation")
		}
		return false, nil
	})
	if err != nil {
		return nil, err
	}

	return event.Object.(*api.Secret), nil
}
Beispiel #10
0
// waitForPod watches the given pod until the exitCondition is true
func waitForPod(podClient coreclient.PodsGetter, ns, name string, exitCondition watch.ConditionFunc) (*api.Pod, error) {
	w, err := podClient.Pods(ns).Watch(api.SingleObject(api.ObjectMeta{Name: name}))
	if err != nil {
		return nil, err
	}

	intr := interrupt.New(nil, w.Stop)
	var result *api.Pod
	err = intr.Run(func() error {
		ev, err := watch.Until(0, w, func(ev watch.Event) (bool, error) {
			return exitCondition(ev)
		})
		result = ev.Object.(*api.Pod)
		return err
	})
	return result, err
}
Beispiel #11
0
func TestBootstrapping(t *testing.T) {
	superUser := "******"

	masterConfig := framework.NewIntegrationTestMasterConfig()
	masterConfig.Authorizer = newRBACAuthorizer(t, superUser, masterConfig)
	masterConfig.Authenticator = newFakeAuthenticator()
	masterConfig.AuthorizerRBACSuperUser = superUser
	_, s := framework.RunAMaster(masterConfig)
	defer s.Close()

	clientset := clientset.NewForConfigOrDie(&restclient.Config{BearerToken: superUser, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})

	watcher, err := clientset.Rbac().ClusterRoles().Watch(api.ListOptions{ResourceVersion: "0"})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	_, err = watch.Until(30*time.Second, watcher, func(event watch.Event) (bool, error) {
		if event.Type != watch.Added {
			return false, nil
		}
		return true, nil
	})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	clusterRoles, err := clientset.Rbac().ClusterRoles().List(api.ListOptions{})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if len(clusterRoles.Items) == 0 {
		t.Fatalf("missing cluster roles")
	}

	for _, clusterRole := range clusterRoles.Items {
		if clusterRole.Name == "cluster-admin" {
			return
		}
	}

	t.Errorf("missing cluster-admin: %v", clusterRoles)
}
Beispiel #12
0
// GetFirstPod returns a pod matching the namespace and label selector
// and the number of all pods that match the label selector.
func GetFirstPod(client coreclient.PodsGetter, namespace string, selector labels.Selector, timeout time.Duration, sortBy func([]*v1.Pod) sort.Interface) (*api.Pod, int, error) {
	options := api.ListOptions{LabelSelector: selector}

	podList, err := client.Pods(namespace).List(options)
	if err != nil {
		return nil, 0, err
	}
	pods := []*v1.Pod{}
	for i := range podList.Items {
		pod := podList.Items[i]
		externalPod := &v1.Pod{}
		v1.Convert_api_Pod_To_v1_Pod(&pod, externalPod, nil)
		pods = append(pods, externalPod)
	}
	if len(pods) > 0 {
		sort.Sort(sortBy(pods))
		internalPod := &api.Pod{}
		v1.Convert_v1_Pod_To_api_Pod(pods[0], internalPod, nil)
		return internalPod, len(podList.Items), nil
	}

	// Watch until we observe a pod
	options.ResourceVersion = podList.ResourceVersion
	w, err := client.Pods(namespace).Watch(options)
	if err != nil {
		return nil, 0, err
	}
	defer w.Stop()

	condition := func(event watch.Event) (bool, error) {
		return event.Type == watch.Added || event.Type == watch.Modified, nil
	}
	event, err := watch.Until(timeout, w, condition)
	if err != nil {
		return nil, 0, err
	}
	pod, ok := event.Object.(*api.Pod)
	if !ok {
		return nil, 0, fmt.Errorf("%#v is not a pod event", event)
	}
	return pod, 1, nil
}
Beispiel #13
0
func DeleteAndWaitForNamespaceTermination(c *kclient.Client, name string) error {
	w, err := c.Namespaces().Watch(kapi.ListOptions{})
	if err != nil {
		return err
	}
	if err := c.Namespaces().Delete(name); err != nil {
		return err
	}
	_, err = watch.Until(30*time.Second, w, func(event watch.Event) (bool, error) {
		if event.Type != watch.Deleted {
			return false, nil
		}
		namespace, ok := event.Object.(*kapi.Namespace)
		if !ok {
			return false, nil
		}
		return namespace.Name == name, nil
	})
	return err
}
Beispiel #14
0
func validateDNSResults(f *e2e.Framework, pod *api.Pod, fileNames sets.String, expect int) {
	By("submitting the pod to kubernetes")
	podClient := f.Client.Pods(f.Namespace.Name)
	defer func() {
		By("deleting the pod")
		defer GinkgoRecover()
		podClient.Delete(pod.Name, api.NewDeleteOptions(0))
	}()
	updated, err := podClient.Create(pod)
	if err != nil {
		e2e.Failf("Failed to create %s pod: %v", pod.Name, err)
	}

	w, err := f.Client.Pods(f.Namespace.Name).Watch(api.SingleObject(api.ObjectMeta{Name: pod.Name, ResourceVersion: updated.ResourceVersion}))
	if err != nil {
		e2e.Failf("Failed: %v", err)
	}
	if _, err = watch.Until(e2e.PodStartTimeout, w, PodSucceeded); err != nil {
		e2e.Failf("Failed: %v", err)
	}

	By("retrieving the pod logs")
	r, err := podClient.GetLogs(pod.Name, &api.PodLogOptions{Container: "querier"}).Stream()
	if err != nil {
		e2e.Failf("Failed to get pod logs %s: %v", pod.Name, err)
	}
	out, err := ioutil.ReadAll(r)
	if err != nil {
		e2e.Failf("Failed to read pod logs %s: %v", pod.Name, err)
	}

	// Try to find results for each expected name.
	By("looking for the results for each expected name from probiers")

	if err := assertLinesExist(fileNames, expect, bytes.NewBuffer(out)); err != nil {
		e2e.Logf("Got results from pod:\n%s", out)
		e2e.Failf("Unexpected results: %v", err)
	}

	e2e.Logf("DNS probes using %s succeeded\n", pod.Name)
}
Beispiel #15
0
func watchUntilReady(info *resource.Info) error {
	w, err := resource.NewHelper(info.Client, info.Mapping).WatchSingle(info.Namespace, info.Name, info.ResourceVersion)
	if err != nil {
		return err
	}

	kind := info.Mapping.GroupVersionKind.Kind
	log.Printf("Watching for changes to %s %s", kind, info.Name)
	timeout := time.Minute * 5

	// What we watch for depends on the Kind.
	// - For a Job, we watch for completion.
	// - For all else, we watch until Ready.
	// In the future, we might want to add some special logic for types
	// like Ingress, Volume, etc.

	_, err = watch.Until(timeout, w, func(e watch.Event) (bool, error) {
		switch e.Type {
		case watch.Added, watch.Modified:
			// For things like a secret or a config map, this is the best indicator
			// we get. We care mostly about jobs, where what we want to see is
			// the status go into a good state. For other types, like ReplicaSet
			// we don't really do anything to support these as hooks.
			log.Printf("Add/Modify event for %s: %v", info.Name, e.Type)
			if kind == "Job" {
				return waitForJob(e, info.Name)
			}
			return true, nil
		case watch.Deleted:
			log.Printf("Deleted event for %s", info.Name)
			return true, nil
		case watch.Error:
			// Handle error and return with an error.
			log.Printf("Error event for %s", info.Name)
			return true, fmt.Errorf("Failed to deploy %s", info.Name)
		default:
			return false, nil
		}
	})
	return err
}
Beispiel #16
0
func watchAndWaitForDeploymentConfig(c *oclient.Client, ns string, name string, timeout time.Duration) error {
	if isDeploymentConfigAvailable(c, ns, name) {
		return nil
	}
	w, err := c.DeploymentConfigs(ns).Watch(api.ListOptions{})
	if err != nil {
		return err
	}
	_, err = watch.Until(timeout, w, func(e watch.Event) (bool, error) {
		if e.Type == watch.Error {
			return false, fmt.Errorf("encountered error while watching DeploymentConfigs: %v", e.Object)
		}
		obj, isDC := e.Object.(*deployapi.DeploymentConfig)
		if !isDC {
			return false, fmt.Errorf("received unknown object while watching for DeploymentConfigs: %v", obj)
		}
		deployment := obj
		if deployment.Name == name {
			replicas := deployment.Status.Replicas
			available := deployment.Status.AvailableReplicas
			unavailable := deployment.Status.UnavailableReplicas
			if unavailable == 0 && available > 0 {
				util.Infof("DeploymentConfig %s now has %d available replicas\n", name, available)
				return true, nil
			} else {
				util.Infof("DeploymentConfig %s has %d replicas, %d available and %d unavailable\n", name, replicas, available, unavailable)

			}
		}
		return false, nil
	})
	if err != nil {
		return err
	}
	return nil
}
Beispiel #17
0
// RunGet implements the generic Get command
// TODO: convert all direct flag accessors to a struct and pass that instead of cmd
func RunGet(f cmdutil.Factory, out, errOut io.Writer, cmd *cobra.Command, args []string, options *GetOptions) error {
	if len(options.Raw) > 0 {
		restClient, err := f.RESTClient()
		if err != nil {
			return err
		}

		stream, err := restClient.Get().RequestURI(options.Raw).Stream()
		if err != nil {
			return err
		}
		defer stream.Close()

		_, err = io.Copy(out, stream)
		if err != nil && err != io.EOF {
			return err
		}
		return nil
	}

	selector := cmdutil.GetFlagString(cmd, "selector")
	allNamespaces := cmdutil.GetFlagBool(cmd, "all-namespaces")
	showKind := cmdutil.GetFlagBool(cmd, "show-kind")
	mapper, typer, err := f.UnstructuredObject()
	if err != nil {
		return err
	}
	filterFuncs := f.DefaultResourceFilterFunc()
	filterOpts := f.DefaultResourceFilterOptions(cmd, allNamespaces)

	cmdNamespace, enforceNamespace, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	if allNamespaces {
		enforceNamespace = false
	}

	if len(args) == 0 && cmdutil.IsFilenameEmpty(options.Filenames) {
		fmt.Fprint(errOut, "You must specify the type of resource to get. ", valid_resources)

		fullCmdName := cmd.Parent().CommandPath()
		usageString := "Required resource not specified."
		if len(fullCmdName) > 0 && cmdutil.IsSiblingCommandExists(cmd, "explain") {
			usageString = fmt.Sprintf("%s\nUse \"%s explain <resource>\" for a detailed description of that resource (e.g. %[2]s explain pods).", usageString, fullCmdName)
		}

		return cmdutil.UsageError(cmd, usageString)
	}

	// always show resources when getting by name or filename
	argsHasNames, err := resource.HasNames(args)
	if err != nil {
		return err
	}
	if len(options.Filenames) > 0 || argsHasNames {
		cmd.Flag("show-all").Value.Set("true")
	}
	export := cmdutil.GetFlagBool(cmd, "export")

	// handle watch separately since we cannot watch multiple resource types
	isWatch, isWatchOnly := cmdutil.GetFlagBool(cmd, "watch"), cmdutil.GetFlagBool(cmd, "watch-only")
	if isWatch || isWatchOnly {
		r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.UnstructuredClientForMapping), runtime.UnstructuredJSONScheme).
			NamespaceParam(cmdNamespace).DefaultNamespace().AllNamespaces(allNamespaces).
			FilenameParam(enforceNamespace, &options.FilenameOptions).
			SelectorParam(selector).
			ExportParam(export).
			ResourceTypeOrNameArgs(true, args...).
			SingleResourceType().
			Latest().
			Do()
		err := r.Err()
		if err != nil {
			return err
		}
		infos, err := r.Infos()
		if err != nil {
			return err
		}
		if len(infos) != 1 {
			return fmt.Errorf("watch is only supported on individual resources and resource collections - %d resources were found", len(infos))
		}
		info := infos[0]
		mapping := info.ResourceMapping()
		printer, err := f.PrinterForMapping(cmd, mapping, allNamespaces)
		if err != nil {
			return err
		}
		obj, err := r.Object()
		if err != nil {
			return err
		}

		// watching from resourceVersion 0, starts the watch at ~now and
		// will return an initial watch event.  Starting form ~now, rather
		// the rv of the object will insure that we start the watch from
		// inside the watch window, which the rv of the object might not be.
		rv := "0"
		isList := meta.IsListType(obj)
		if isList {
			// the resourceVersion of list objects is ~now but won't return
			// an initial watch event
			rv, err = mapping.MetadataAccessor.ResourceVersion(obj)
			if err != nil {
				return err
			}
		}

		// print the current object
		filteredResourceCount := 0
		if !isWatchOnly {
			if err := printer.PrintObj(obj, out); err != nil {
				return fmt.Errorf("unable to output the provided object: %v", err)
			}
			filteredResourceCount++
			cmdutil.PrintFilterCount(filteredResourceCount, mapping.Resource, filterOpts)
		}

		// print watched changes
		w, err := r.Watch(rv)
		if err != nil {
			return err
		}

		first := true
		filteredResourceCount = 0
		intr := interrupt.New(nil, w.Stop)
		intr.Run(func() error {
			_, err := watch.Until(0, w, func(e watch.Event) (bool, error) {
				if !isList && first {
					// drop the initial watch event in the single resource case
					first = false
					return false, nil
				}
				err := printer.PrintObj(e.Object, out)
				if err != nil {
					return false, err
				}
				filteredResourceCount++
				cmdutil.PrintFilterCount(filteredResourceCount, mapping.Resource, filterOpts)
				return false, nil
			})
			return err
		})
		return nil
	}

	r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.UnstructuredClientForMapping), runtime.UnstructuredJSONScheme).
		NamespaceParam(cmdNamespace).DefaultNamespace().AllNamespaces(allNamespaces).
		FilenameParam(enforceNamespace, &options.FilenameOptions).
		SelectorParam(selector).
		ExportParam(export).
		ResourceTypeOrNameArgs(true, args...).
		ContinueOnError().
		Latest().
		Flatten().
		Do()
	err = r.Err()
	if err != nil {
		return err
	}

	printer, generic, err := cmdutil.PrinterForCommand(cmd)
	if err != nil {
		return err
	}

	if generic {
		// we flattened the data from the builder, so we have individual items, but now we'd like to either:
		// 1. if there is more than one item, combine them all into a single list
		// 2. if there is a single item and that item is a list, leave it as its specific list
		// 3. if there is a single item and it is not a a list, leave it as a single item
		var errs []error
		singular := false
		infos, err := r.IntoSingular(&singular).Infos()
		if err != nil {
			if singular {
				return err
			}
			errs = append(errs, err)
		}
		if len(infos) == 0 && len(errs) == 0 {
			outputEmptyListWarning(errOut)
		}

		res := ""
		if len(infos) > 0 {
			res = infos[0].ResourceMapping().Resource
		}

		var obj runtime.Object
		if singular {
			obj = infos[0].Object
		} else {
			// we have more than one item, so coerce all items into a list
			list := &runtime.UnstructuredList{
				Object: map[string]interface{}{
					"kind":       "List",
					"apiVersion": "v1",
					"metadata":   map[string]interface{}{},
				},
			}
			for _, info := range infos {
				list.Items = append(list.Items, info.Object.(*runtime.Unstructured))
			}
			obj = list
		}

		isList := meta.IsListType(obj)
		if isList {
			filteredResourceCount, items, err := cmdutil.FilterResourceList(obj, filterFuncs, filterOpts)
			if err != nil {
				return err
			}

			// take the filtered items and create a new list for display
			list := &runtime.UnstructuredList{
				Object: map[string]interface{}{
					"kind":       "List",
					"apiVersion": "v1",
					"metadata":   map[string]interface{}{},
				},
			}
			if listMeta, err := meta.ListAccessor(obj); err == nil {
				list.Object["selfLink"] = listMeta.GetSelfLink()
				list.Object["resourceVersion"] = listMeta.GetResourceVersion()
			}

			for _, item := range items {
				list.Items = append(list.Items, item.(*runtime.Unstructured))
			}
			if err := printer.PrintObj(list, out); err != nil {
				errs = append(errs, err)
			}

			cmdutil.PrintFilterCount(filteredResourceCount, res, filterOpts)
			return utilerrors.Reduce(utilerrors.Flatten(utilerrors.NewAggregate(errs)))
		}

		filteredResourceCount := 0
		if isFiltered, err := filterFuncs.Filter(obj, filterOpts); !isFiltered {
			if err != nil {
				glog.V(2).Infof("Unable to filter resource: %v", err)
			} else if err := printer.PrintObj(obj, out); err != nil {
				errs = append(errs, err)
			}
		} else if isFiltered {
			filteredResourceCount++
		}

		cmdutil.PrintFilterCount(filteredResourceCount, res, filterOpts)
		return utilerrors.Reduce(utilerrors.Flatten(utilerrors.NewAggregate(errs)))
	}

	allErrs := []error{}
	errs := sets.NewString()
	infos, err := r.Infos()
	if err != nil {
		allErrs = append(allErrs, err)
	}
	if len(infos) == 0 && len(allErrs) == 0 {
		outputEmptyListWarning(errOut)
	}

	objs := make([]runtime.Object, len(infos))
	for ix := range infos {
		objs[ix] = infos[ix].Object
	}

	sorting, err := cmd.Flags().GetString("sort-by")
	if err != nil {
		return err
	}
	var sorter *kubectl.RuntimeSort
	if len(sorting) > 0 && len(objs) > 1 {
		// TODO: questionable
		if sorter, err = kubectl.SortObjects(f.Decoder(true), objs, sorting); err != nil {
			return err
		}
	}

	// use the default printer for each object
	printer = nil
	var lastMapping *meta.RESTMapping
	w := kubectl.GetNewTabWriter(out)
	filteredResourceCount := 0

	if resource.MultipleTypesRequested(args) || cmdutil.MustPrintWithKinds(objs, infos, sorter) {
		showKind = true
	}

	for ix := range objs {
		var mapping *meta.RESTMapping
		var original runtime.Object
		if sorter != nil {
			mapping = infos[sorter.OriginalPosition(ix)].Mapping
			original = infos[sorter.OriginalPosition(ix)].Object
		} else {
			mapping = infos[ix].Mapping
			original = infos[ix].Object
		}
		if printer == nil || lastMapping == nil || mapping == nil || mapping.Resource != lastMapping.Resource {
			if printer != nil {
				w.Flush()
				cmdutil.PrintFilterCount(filteredResourceCount, lastMapping.Resource, filterOpts)
			}
			printer, err = f.PrinterForMapping(cmd, mapping, allNamespaces)
			if err != nil {
				if !errs.Has(err.Error()) {
					errs.Insert(err.Error())
					allErrs = append(allErrs, err)
				}
				continue
			}

			// add linebreak between resource groups (if there is more than one)
			// skip linebreak above first resource group
			noHeaders := cmdutil.GetFlagBool(cmd, "no-headers")
			if lastMapping != nil && !noHeaders {
				fmt.Fprintf(errOut, "%s\n", "")
			}

			lastMapping = mapping
		}

		// filter objects if filter has been defined for current object
		if isFiltered, err := filterFuncs.Filter(original, filterOpts); isFiltered {
			if err == nil {
				filteredResourceCount++
				continue
			}
			if !errs.Has(err.Error()) {
				errs.Insert(err.Error())
				allErrs = append(allErrs, err)
			}
		}

		if resourcePrinter, found := printer.(*kubectl.HumanReadablePrinter); found {
			resourceName := resourcePrinter.GetResourceKind()
			if mapping != nil {
				if resourceName == "" {
					resourceName = mapping.Resource
				}
				if alias, ok := kubectl.ResourceShortFormFor(mapping.Resource); ok {
					resourceName = alias
				} else if resourceName == "" {
					resourceName = "none"
				}
			} else {
				resourceName = "none"
			}

			if showKind {
				resourcePrinter.EnsurePrintWithKind(resourceName)
			}

			if err := printer.PrintObj(original, w); err != nil {
				if !errs.Has(err.Error()) {
					errs.Insert(err.Error())
					allErrs = append(allErrs, err)
				}
			}
			continue
		}
		if err := printer.PrintObj(original, w); err != nil {
			if !errs.Has(err.Error()) {
				errs.Insert(err.Error())
				allErrs = append(allErrs, err)
			}
			continue
		}
	}
	w.Flush()
	if printer != nil && lastMapping != nil {
		cmdutil.PrintFilterCount(filteredResourceCount, lastMapping.Resource, filterOpts)
	}
	return utilerrors.NewAggregate(allErrs)
}
Beispiel #18
0
					Name:            expectedPod.Name,
					ResourceVersion: expectedPod.ResourceVersion,
				},
			))
			Expect(err).NotTo(HaveOccurred())

			By("Verifying the 2nd pod is removed only when it becomes running and ready")
			pst.restoreProbe(ps, testProbe)
			_, err = watch.Until(statefulsetTimeout, watcher, func(event watch.Event) (bool, error) {
				pod := event.Object.(*v1.Pod)
				if event.Type == watch.Deleted && pod.Name == expectedPodName {
					return false, fmt.Errorf("Pod %v was deleted before enter running", pod.Name)
				}
				framework.Logf("Observed event %v for pod %v. Phase %v, Pod is ready %v",
					event.Type, pod.Name, pod.Status.Phase, v1.IsPodReady(pod))
				if pod.Name != expectedPodName {
					return false, nil
				}
				if pod.Status.Phase == v1.PodRunning && v1.IsPodReady(pod) {
					return true, nil
				}
				return false, nil
			})
			Expect(err).NotTo(HaveOccurred())
		})

		It("Scaling should happen in predictable order and halt if any stateful pod is unhealthy", func() {
			psLabels := klabels.Set(labels)
			By("Initializing watcher for selector " + psLabels.String())
			watcher, err := f.ClientSet.Core().Pods(ns).Watch(v1.ListOptions{
				LabelSelector: psLabels.AsSelector().String(),
Beispiel #19
0
// startMasterOrDie starts a kubernetes master and an httpserver to handle api requests
func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server) {
	var m *master.Master
	var s *httptest.Server

	if incomingServer != nil {
		s = incomingServer
	} else {
		s = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
			m.GenericAPIServer.Handler.ServeHTTP(w, req)
		}))
	}

	if masterConfig == nil {
		masterConfig = NewMasterConfig()
		masterConfig.GenericConfig.EnableProfiling = true
		masterConfig.GenericConfig.EnableSwaggerSupport = true
		masterConfig.GenericConfig.EnableOpenAPISupport = true
		masterConfig.GenericConfig.OpenAPIConfig.Info = &spec.Info{
			InfoProps: spec.InfoProps{
				Title:   "Kubernetes",
				Version: "unversioned",
			},
		}
		masterConfig.GenericConfig.OpenAPIConfig.DefaultResponse = &spec.Response{
			ResponseProps: spec.ResponseProps{
				Description: "Default Response.",
			},
		}
		masterConfig.GenericConfig.OpenAPIConfig.Definitions = openapi.OpenAPIDefinitions
	}

	// set the loopback client config
	if masterConfig.GenericConfig.LoopbackClientConfig == nil {
		masterConfig.GenericConfig.LoopbackClientConfig = &restclient.Config{QPS: 50, Burst: 100, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}}
	}
	masterConfig.GenericConfig.LoopbackClientConfig.Host = s.URL

	privilegedLoopbackToken := uuid.NewRandom().String()
	// wrap any available authorizer
	tokens := make(map[string]*user.DefaultInfo)
	tokens[privilegedLoopbackToken] = &user.DefaultInfo{
		Name:   user.APIServerUser,
		UID:    uuid.NewRandom().String(),
		Groups: []string{user.SystemPrivilegedGroup},
	}

	tokenAuthenticator := authenticator.NewAuthenticatorFromTokens(tokens)
	if masterConfig.GenericConfig.Authenticator == nil {
		masterConfig.GenericConfig.Authenticator = authenticatorunion.New(tokenAuthenticator, authauthenticator.RequestFunc(alwaysEmpty))
	} else {
		masterConfig.GenericConfig.Authenticator = authenticatorunion.New(tokenAuthenticator, masterConfig.GenericConfig.Authenticator)
	}

	if masterConfig.GenericConfig.Authorizer != nil {
		tokenAuthorizer := authorizer.NewPrivilegedGroups(user.SystemPrivilegedGroup)
		masterConfig.GenericConfig.Authorizer = authorizerunion.New(tokenAuthorizer, masterConfig.GenericConfig.Authorizer)
	} else {
		masterConfig.GenericConfig.Authorizer = alwaysAllow{}
	}

	masterConfig.GenericConfig.LoopbackClientConfig.BearerToken = privilegedLoopbackToken

	m, err := masterConfig.Complete().New()
	if err != nil {
		glog.Fatalf("error in bringing up the master: %v", err)
	}
	if masterReceiver != nil {
		masterReceiver.SetMaster(m)
	}

	cfg := *masterConfig.GenericConfig.LoopbackClientConfig
	cfg.ContentConfig.GroupVersion = &unversioned.GroupVersion{}
	privilegedClient, err := restclient.RESTClientFor(&cfg)
	if err != nil {
		glog.Fatal(err)
	}
	err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
		result := privilegedClient.Get().AbsPath("/healthz").Do()
		status := 0
		result.StatusCode(&status)
		if status == 200 {
			return true, nil
		}
		return false, nil
	})
	if err != nil {
		glog.Fatal(err)
	}

	// TODO have this start method actually use the normal start sequence for the API server
	// this method never actually calls the `Run` method for the API server
	// fire the post hooks ourselves
	m.GenericAPIServer.RunPostStartHooks()

	// wait for services to be ready
	if masterConfig.EnableCoreControllers {
		// TODO Once /healthz is updated for posthooks, we'll wait for good health
		coreClient := coreclient.NewForConfigOrDie(&cfg)
		svcWatch, err := coreClient.Services(api.NamespaceDefault).Watch(v1.ListOptions{})
		if err != nil {
			glog.Fatal(err)
		}
		_, err = watch.Until(30*time.Second, svcWatch, func(event watch.Event) (bool, error) {
			if event.Type != watch.Added {
				return false, nil
			}
			if event.Object.(*v1.Service).Name == "kubernetes" {
				return true, nil
			}
			return false, nil
		})
		if err != nil {
			glog.Fatal(err)
		}
	}

	return m, s
}
func TestServiceServingCertSigner(t *testing.T) {
	ns := "service-serving-cert-signer"

	testutil.RequireEtcd(t)
	defer testutil.DumpEtcdOnFailure(t)

	_, clusterAdminKubeConfig, err := testserver.StartTestMaster()
	if err != nil {
		t.Fatal(err)
	}
	clusterAdminConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
	if err != nil {
		t.Fatal(err)
	}
	clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatal(err)
	}
	clusterAdminKubeClientset, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatal(err)
	}
	if _, err := testserver.CreateNewProject(clusterAdminClient, *clusterAdminConfig, "service-serving-cert-signer", "deads"); err != nil {
		t.Fatal(err)
	}

	service := &kapi.Service{
		ObjectMeta: kapi.ObjectMeta{
			Name: "my-svc",
			Annotations: map[string]string{
				servingcert.ServingCertSecretAnnotation: "my-secret",
			},
		},
		Spec: kapi.ServiceSpec{
			Ports: []kapi.ServicePort{
				{Port: 80},
			},
		},
	}
	actualService, err := clusterAdminKubeClientset.Services(ns).Create(service)
	if err != nil {
		t.Fatal(err)
	}

	var actualFirstSecret *kapi.Secret
	secretWatcher1, err := clusterAdminKubeClientset.Secrets(ns).Watch(kapi.ListOptions{ResourceVersion: actualService.ResourceVersion})
	if err != nil {
		t.Fatal(err)
	}
	_, err = watch.Until(30*time.Second, secretWatcher1, func(event watch.Event) (bool, error) {
		if event.Type != watch.Added {
			return false, nil
		}
		secret := event.Object.(*kapi.Secret)
		if secret.Name == "my-secret" {
			actualFirstSecret = secret
			return true, nil
		}
		return false, nil
	})
	if err != nil {
		t.Fatal(err)
	}
	secretWatcher1.Stop()

	// now check to make sure that regeneration works.  First, remove the annotation entirely, this simulates
	// the "old data" case where the expiry didn't exist
	delete(actualFirstSecret.Annotations, servingcert.ServingCertExpiryAnnotation)
	actualSecondSecret, err := clusterAdminKubeClientset.Secrets(ns).Update(actualFirstSecret)
	if err != nil {
		t.Fatal(err)
	}

	var actualThirdSecret *kapi.Secret
	secretWatcher2, err := clusterAdminKubeClientset.Secrets(ns).Watch(kapi.ListOptions{ResourceVersion: actualSecondSecret.ResourceVersion})
	if err != nil {
		t.Fatal(err)
	}
	_, err = watch.Until(30*time.Second, secretWatcher2, func(event watch.Event) (bool, error) {
		if event.Type != watch.Modified {
			return false, nil
		}
		secret := event.Object.(*kapi.Secret)
		if secret.Name == "my-secret" {
			actualThirdSecret = secret
			return true, nil
		}
		return false, nil
	})
	if err != nil {
		t.Fatal(err)
	}
	secretWatcher2.Stop()

	if _, ok := actualThirdSecret.Annotations[servingcert.ServingCertExpiryAnnotation]; !ok {
		t.Fatalf("missing annotation: %#v", actualThirdSecret)
	}
	if reflect.DeepEqual(actualThirdSecret.Data, actualSecondSecret.Data) {
		t.Fatalf("didn't update secret content: %#v", actualThirdSecret)
	}

	// now change the annotation to indicate that we're about to expire.  The controller should regenerate.
	actualThirdSecret.Annotations[servingcert.ServingCertExpiryAnnotation] = time.Now().Add(10 * time.Second).Format(time.RFC3339)
	actualFourthSecret, err := clusterAdminKubeClientset.Secrets(ns).Update(actualThirdSecret)
	if err != nil {
		t.Fatal(err)
	}

	var actualFifthSecret *kapi.Secret
	secretWatcher3, err := clusterAdminKubeClientset.Secrets(ns).Watch(kapi.ListOptions{ResourceVersion: actualFourthSecret.ResourceVersion})
	if err != nil {
		t.Fatal(err)
	}
	_, err = watch.Until(30*time.Second, secretWatcher3, func(event watch.Event) (bool, error) {
		if event.Type != watch.Modified {
			return false, nil
		}
		secret := event.Object.(*kapi.Secret)
		if secret.Name == "my-secret" {
			actualFifthSecret = secret
			return true, nil
		}
		return false, nil
	})
	if err != nil {
		t.Fatal(err)
	}
	secretWatcher3.Stop()

	if reflect.DeepEqual(actualFourthSecret.Data, actualFifthSecret.Data) {
		t.Fatalf("didn't update secret content: %#v", actualFifthSecret)
	}
}
Beispiel #21
0
// RunGet implements the generic Get command
// TODO: convert all direct flag accessors to a struct and pass that instead of cmd
func RunGet(f *cmdutil.Factory, out io.Writer, errOut io.Writer, cmd *cobra.Command, args []string, options *GetOptions) error {
	if len(options.Raw) > 0 {
		restClient, err := f.RESTClient()
		if err != nil {
			return err
		}

		stream, err := restClient.Get().RequestURI(options.Raw).Stream()
		if err != nil {
			return err
		}
		defer stream.Close()

		for {
			buffer := make([]byte, 1024, 1024)
			bytesRead, err := stream.Read(buffer)
			if bytesRead > 0 {
				fmt.Printf("%s", string(buffer[:bytesRead]))
			}
			if err == io.EOF {
				return nil
			}
			if err != nil {
				return err
			}
		}
	}

	selector := cmdutil.GetFlagString(cmd, "selector")
	allNamespaces := cmdutil.GetFlagBool(cmd, "all-namespaces")
	showKind := cmdutil.GetFlagBool(cmd, "show-kind")
	mapper, typer := f.Object()
	printAll := false
	filterFuncs := f.DefaultResourceFilterFunc()
	filterOpts := f.DefaultResourceFilterOptions(cmd, allNamespaces)

	cmdNamespace, enforceNamespace, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	if allNamespaces {
		enforceNamespace = false
	}

	if len(args) == 0 && cmdutil.IsFilenameEmpty(options.Filenames) {
		fmt.Fprint(errOut, "You must specify the type of resource to get. ", valid_resources)

		fullCmdName := cmd.Parent().CommandPath()
		usageString := "Required resource not specified."
		if len(fullCmdName) > 0 && cmdutil.IsSiblingCommandExists(cmd, "explain") {
			usageString = fmt.Sprintf("%s\nUse \"%s explain <resource>\" for a detailed description of that resource (e.g. %[2]s explain pods).", usageString, fullCmdName)
		}

		return cmdutil.UsageError(cmd, usageString)
	}

	// determine if args contains "all"
	for _, a := range args {
		if a == "all" {
			printAll = true
			break
		}
	}

	// always show resources when getting by name or filename
	argsHasNames, err := resource.HasNames(args)
	if err != nil {
		return err
	}
	if len(options.Filenames) > 0 || argsHasNames {
		cmd.Flag("show-all").Value.Set("true")
	}
	export := cmdutil.GetFlagBool(cmd, "export")

	// handle watch separately since we cannot watch multiple resource types
	isWatch, isWatchOnly := cmdutil.GetFlagBool(cmd, "watch"), cmdutil.GetFlagBool(cmd, "watch-only")
	if isWatch || isWatchOnly {
		r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)).
			NamespaceParam(cmdNamespace).DefaultNamespace().AllNamespaces(allNamespaces).
			FilenameParam(enforceNamespace, &options.FilenameOptions).
			SelectorParam(selector).
			ExportParam(export).
			ResourceTypeOrNameArgs(true, args...).
			SingleResourceType().
			Latest().
			Do()
		err := r.Err()
		if err != nil {
			return err
		}
		infos, err := r.Infos()
		if err != nil {
			return err
		}
		if len(infos) != 1 {
			return fmt.Errorf("watch is only supported on individual resources and resource collections - %d resources were found", len(infos))
		}
		info := infos[0]
		mapping := info.ResourceMapping()
		printer, err := f.PrinterForMapping(cmd, mapping, allNamespaces)
		if err != nil {
			return err
		}
		obj, err := r.Object()
		if err != nil {
			return err
		}

		// watching from resourceVersion 0, starts the watch at ~now and
		// will return an initial watch event.  Starting form ~now, rather
		// the rv of the object will insure that we start the watch from
		// inside the watch window, which the rv of the object might not be.
		rv := "0"
		isList := meta.IsListType(obj)
		if isList {
			// the resourceVersion of list objects is ~now but won't return
			// an initial watch event
			rv, err = mapping.MetadataAccessor.ResourceVersion(obj)
			if err != nil {
				return err
			}
		}

		// print the current object
		filteredResourceCount := 0
		if !isWatchOnly {
			if err := printer.PrintObj(obj, out); err != nil {
				return fmt.Errorf("unable to output the provided object: %v", err)
			}
			filteredResourceCount++
			cmdutil.PrintFilterCount(filteredResourceCount, mapping.Resource, errOut, filterOpts)
		}

		// print watched changes
		w, err := r.Watch(rv)
		if err != nil {
			return err
		}

		first := true
		filteredResourceCount = 0
		intr := interrupt.New(nil, w.Stop)
		intr.Run(func() error {
			_, err := watch.Until(0, w, func(e watch.Event) (bool, error) {
				if !isList && first {
					// drop the initial watch event in the single resource case
					first = false
					return false, nil
				}
				err := printer.PrintObj(e.Object, out)
				if err != nil {
					return false, err
				}
				filteredResourceCount++
				cmdutil.PrintFilterCount(filteredResourceCount, mapping.Resource, errOut, filterOpts)
				return false, nil
			})
			return err
		})
		return nil
	}

	r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)).
		NamespaceParam(cmdNamespace).DefaultNamespace().AllNamespaces(allNamespaces).
		FilenameParam(enforceNamespace, &options.FilenameOptions).
		SelectorParam(selector).
		ExportParam(export).
		ResourceTypeOrNameArgs(true, args...).
		ContinueOnError().
		Latest().
		Flatten().
		Do()
	err = r.Err()
	if err != nil {
		return err
	}

	printer, generic, err := cmdutil.PrinterForCommand(cmd)
	if err != nil {
		return err
	}

	if generic {
		clientConfig, err := f.ClientConfig()
		if err != nil {
			return err
		}

		// the outermost object will be converted to the output-version, but inner
		// objects can use their mappings
		version, err := cmdutil.OutputVersion(cmd, clientConfig.GroupVersion)
		if err != nil {
			return err
		}

		var errs []error
		singular := false
		infos, err := r.IntoSingular(&singular).Infos()
		if err != nil {
			if singular {
				return err
			}
			errs = append(errs, err)
		}

		res := ""
		if len(infos) > 0 {
			res = infos[0].ResourceMapping().Resource
		}

		obj, err := resource.AsVersionedObject(infos, !singular, version, f.JSONEncoder())
		if err != nil {
			return err
		}

		isList := meta.IsListType(obj)
		if isList {
			filteredResourceCount, items, err := cmdutil.FilterResourceList(obj, filterFuncs, filterOpts)
			if err != nil {
				return err
			}
			filteredObj, err := cmdutil.ObjectListToVersionedObject(items, version)
			if err != nil {
				return err
			}
			if err := printer.PrintObj(filteredObj, out); err != nil {
				errs = append(errs, err)
			}

			cmdutil.PrintFilterCount(filteredResourceCount, res, errOut, filterOpts)
			return utilerrors.Reduce(utilerrors.Flatten(utilerrors.NewAggregate(errs)))
		}

		filteredResourceCount := 0
		if isFiltered, err := filterFuncs.Filter(obj, filterOpts); !isFiltered {
			if err != nil {
				glog.V(2).Infof("Unable to filter resource: %v", err)
			} else if err := printer.PrintObj(obj, out); err != nil {
				errs = append(errs, err)
			}
		} else if isFiltered {
			filteredResourceCount++
		}

		cmdutil.PrintFilterCount(filteredResourceCount, res, errOut, filterOpts)
		return utilerrors.Reduce(utilerrors.Flatten(utilerrors.NewAggregate(errs)))
	}

	allErrs := []error{}
	infos, err := r.Infos()
	if err != nil {
		allErrs = append(allErrs, err)
	}

	objs := make([]runtime.Object, len(infos))
	for ix := range infos {
		objs[ix] = infos[ix].Object
	}

	sorting, err := cmd.Flags().GetString("sort-by")
	if err != nil {
		return err
	}
	var sorter *kubectl.RuntimeSort
	if len(sorting) > 0 && len(objs) > 1 {
		clientConfig, err := f.ClientConfig()
		if err != nil {
			return err
		}

		version, err := cmdutil.OutputVersion(cmd, clientConfig.GroupVersion)
		if err != nil {
			return err
		}

		for ix := range infos {
			objs[ix], err = infos[ix].Mapping.ConvertToVersion(infos[ix].Object, version)
			if err != nil {
				allErrs = append(allErrs, err)
				continue
			}
		}

		// TODO: questionable
		if sorter, err = kubectl.SortObjects(f.Decoder(true), objs, sorting); err != nil {
			return err
		}
	}

	// use the default printer for each object
	printer = nil
	var lastMapping *meta.RESTMapping
	w := kubectl.GetNewTabWriter(out)
	filteredResourceCount := 0

	if cmdutil.MustPrintWithKinds(objs, infos, sorter, printAll) {
		showKind = true
	}

	for ix := range objs {
		var mapping *meta.RESTMapping
		var original runtime.Object
		if sorter != nil {
			mapping = infos[sorter.OriginalPosition(ix)].Mapping
			original = infos[sorter.OriginalPosition(ix)].Object
		} else {
			mapping = infos[ix].Mapping
			original = infos[ix].Object
		}
		if printer == nil || lastMapping == nil || mapping == nil || mapping.Resource != lastMapping.Resource {
			if printer != nil {
				w.Flush()
				cmdutil.PrintFilterCount(filteredResourceCount, lastMapping.Resource, errOut, filterOpts)
			}
			printer, err = f.PrinterForMapping(cmd, mapping, allNamespaces)
			if err != nil {
				allErrs = append(allErrs, err)
				continue
			}

			// add linebreak between resource groups (if there is more than one)
			// skip linebreak above first resource group
			noHeaders := cmdutil.GetFlagBool(cmd, "no-headers")
			if lastMapping != nil && !noHeaders {
				fmt.Fprintf(errOut, "%s\n", "")
			}

			lastMapping = mapping
		}

		// filter objects if filter has been defined for current object
		if isFiltered, err := filterFuncs.Filter(original, filterOpts); isFiltered {
			if err == nil {
				filteredResourceCount++
				continue
			}
			allErrs = append(allErrs, err)
		}

		if resourcePrinter, found := printer.(*kubectl.HumanReadablePrinter); found {
			resourceName := resourcePrinter.GetResourceKind()
			if mapping != nil {
				if resourceName == "" {
					resourceName = mapping.Resource
				}
				if alias, ok := kubectl.ResourceShortFormFor(mapping.Resource); ok {
					resourceName = alias
				} else if resourceName == "" {
					resourceName = "none"
				}
			} else {
				resourceName = "none"
			}

			if showKind {
				resourcePrinter.EnsurePrintWithKind(resourceName)
			}

			if err := printer.PrintObj(original, w); err != nil {
				allErrs = append(allErrs, err)
			}
			continue
		}
		if err := printer.PrintObj(original, w); err != nil {
			allErrs = append(allErrs, err)
			continue
		}
	}
	w.Flush()
	if printer != nil && lastMapping != nil {
		cmdutil.PrintFilterCount(filteredResourceCount, lastMapping.Resource, errOut, filterOpts)
	}
	return utilerrors.NewAggregate(allErrs)
}
			framework.Failf("Pod %v did not start running: %v", podName, err)
		}

		var initialPetPodUID types.UID
		By("waiting until pet pod " + petPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name)
		w, err := f.ClientSet.Core().Pods(f.Namespace.Name).Watch(api.SingleObject(api.ObjectMeta{Name: petPodName}))
		framework.ExpectNoError(err)
		// we need to get UID from pod in any state and wait until pet set controller will remove pod atleast once
		_, err = watch.Until(petPodTimeout, w, func(event watch.Event) (bool, error) {
			pod := event.Object.(*api.Pod)
			switch event.Type {
			case watch.Deleted:
				framework.Logf("Observed delete event for pet pod %v in namespace %v", pod.Name, pod.Namespace)
				if initialPetPodUID == "" {
					return false, nil
				}
				return true, nil
			}
			framework.Logf("Observed pet pod in namespace: %v, name: %v, uid: %v, status phase: %v. Waiting for statefulset controller to delete.",
				pod.Namespace, pod.Name, pod.UID, pod.Status.Phase)
			initialPetPodUID = pod.UID
			return false, nil
		})
		if err != nil {
			framework.Failf("Pod %v expected to be re-created atleast once", petPodName)
		}

		By("removing pod with conflicting port in namespace " + f.Namespace.Name)
		err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0))
		framework.ExpectNoError(err)
Beispiel #23
0
// TODO: check for watch expired error and retry watch from latest point?  Same issue exists for Until.
func ListWatchUntil(timeout time.Duration, lw ListerWatcher, conditions ...watch.ConditionFunc) (*watch.Event, error) {
	if len(conditions) == 0 {
		return nil, nil
	}

	list, err := lw.List(v1.ListOptions{})
	if err != nil {
		return nil, err
	}
	initialItems, err := meta.ExtractList(list)
	if err != nil {
		return nil, err
	}

	// use the initial items as simulated "adds"
	var lastEvent *watch.Event
	currIndex := 0
	passedConditions := 0
	for _, condition := range conditions {
		// check the next condition against the previous event and short circuit waiting for the next watch
		if lastEvent != nil {
			done, err := condition(*lastEvent)
			if err != nil {
				return lastEvent, err
			}
			if done {
				passedConditions = passedConditions + 1
				continue
			}
		}

	ConditionSucceeded:
		for currIndex < len(initialItems) {
			lastEvent = &watch.Event{Type: watch.Added, Object: initialItems[currIndex]}
			currIndex++

			done, err := condition(*lastEvent)
			if err != nil {
				return lastEvent, err
			}
			if done {
				passedConditions = passedConditions + 1
				break ConditionSucceeded
			}
		}
	}
	if passedConditions == len(conditions) {
		return lastEvent, nil
	}
	remainingConditions := conditions[passedConditions:]

	metaObj, err := meta.ListAccessor(list)
	if err != nil {
		return nil, err
	}
	currResourceVersion := metaObj.GetResourceVersion()

	watchInterface, err := lw.Watch(v1.ListOptions{ResourceVersion: currResourceVersion})
	if err != nil {
		return nil, err
	}

	return watch.Until(timeout, watchInterface, remainingConditions...)
}
Beispiel #24
0
func RunStatus(f cmdutil.Factory, cmd *cobra.Command, out io.Writer, args []string, options *resource.FilenameOptions) error {
	if len(args) == 0 && cmdutil.IsFilenameEmpty(options.Filenames) {
		return cmdutil.UsageError(cmd, "Required resource not specified.")
	}

	mapper, typer := f.Object()

	cmdNamespace, enforceNamespace, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)).
		NamespaceParam(cmdNamespace).DefaultNamespace().
		FilenameParam(enforceNamespace, options).
		ResourceTypeOrNameArgs(true, args...).
		SingleResourceType().
		Latest().
		Do()
	err = r.Err()
	if err != nil {
		return err
	}

	infos, err := r.Infos()
	if err != nil {
		return err
	}
	if len(infos) != 1 {
		return fmt.Errorf("rollout status is only supported on individual resources and resource collections - %d resources were found", len(infos))
	}
	info := infos[0]
	mapping := info.ResourceMapping()

	obj, err := r.Object()
	if err != nil {
		return err
	}
	rv, err := mapping.MetadataAccessor.ResourceVersion(obj)
	if err != nil {
		return err
	}

	statusViewer, err := f.StatusViewer(mapping)
	if err != nil {
		return err
	}

	revision := cmdutil.GetFlagInt64(cmd, "revision")
	if revision < 0 {
		return fmt.Errorf("revision must be a positive integer: %v", revision)
	}

	// check if deployment's has finished the rollout
	status, done, err := statusViewer.Status(cmdNamespace, info.Name, revision)
	if err != nil {
		return err
	}
	fmt.Fprintf(out, "%s", status)
	if done {
		return nil
	}

	shouldWatch := cmdutil.GetFlagBool(cmd, "watch")
	if !shouldWatch {
		return nil
	}

	// watch for changes to the deployment
	w, err := r.Watch(rv)
	if err != nil {
		return err
	}

	// if the rollout isn't done yet, keep watching deployment status
	intr := interrupt.New(nil, w.Stop)
	return intr.Run(func() error {
		_, err := watch.Until(0, w, func(e watch.Event) (bool, error) {
			// print deployment's status
			status, done, err := statusViewer.Status(cmdNamespace, info.Name, revision)
			if err != nil {
				return false, err
			}
			fmt.Fprintf(out, "%s", status)
			// Quit waiting if the rollout is done
			if done {
				return true, nil
			}
			return false, nil
		})
		return err
	})
}
Beispiel #25
0
// Debug creates and runs a debugging pod.
func (o *DebugOptions) Debug() error {
	pod, originalCommand := o.transformPodForDebug(o.Annotations)
	var commandString string
	switch {
	case len(originalCommand) > 0:
		commandString = strings.Join(originalCommand, " ")
	default:
		commandString = "<image entrypoint>"
	}

	if o.Print != nil {
		return o.Print(pod, o.Attach.Out)
	}

	glog.V(5).Infof("Creating pod: %#v", pod)
	fmt.Fprintf(o.Attach.Err, "Debugging with pod/%s, original command: %s\n", pod.Name, commandString)
	pod, err := o.createPod(pod)
	if err != nil {
		return err
	}

	// ensure the pod is cleaned up on shutdown
	o.Attach.InterruptParent = interrupt.New(
		func(os.Signal) { os.Exit(1) },
		func() {
			stderr := o.Attach.Err
			if stderr == nil {
				stderr = os.Stderr
			}
			fmt.Fprintf(stderr, "\nRemoving debug pod ...\n")
			if err := o.Attach.Client.Pods(pod.Namespace).Delete(pod.Name, kapi.NewDeleteOptions(0)); err != nil {
				if !kapierrors.IsNotFound(err) {
					fmt.Fprintf(stderr, "error: unable to delete the debug pod %q: %v\n", pod.Name, err)
				}
			}
		},
	)

	glog.V(5).Infof("Created attach arguments: %#v", o.Attach)
	return o.Attach.InterruptParent.Run(func() error {
		w, err := o.Attach.Client.Pods(pod.Namespace).Watch(SingleObject(pod.ObjectMeta))
		if err != nil {
			return err
		}
		fmt.Fprintf(o.Attach.Err, "Waiting for pod to start ...\n")
		switch containerRunningEvent, err := watch.Until(o.Timeout, w, kclient.PodContainerRunning(o.Attach.ContainerName)); {
		// api didn't error right away but the pod wasn't even created
		case kapierrors.IsNotFound(err):
			msg := fmt.Sprintf("unable to create the debug pod %q", pod.Name)
			if len(o.NodeName) > 0 {
				msg += fmt.Sprintf(" on node %q", o.NodeName)
			}
			return fmt.Errorf(msg)
		// switch to logging output
		case err == kclient.ErrPodCompleted, !o.Attach.Stdin:
			_, err := kcmd.LogsOptions{
				Object: pod,
				Options: &kapi.PodLogOptions{
					Container: o.Attach.ContainerName,
					Follow:    true,
				},
				Out: o.Attach.Out,

				LogsForObject: o.LogsForObject,
			}.RunLogs()
			return err
		case err != nil:
			return err
		default:
			// TODO this doesn't do us much good for remote debugging sessions, but until we get a local port
			// set up to proxy, this is what we've got.
			if podWithStatus, ok := containerRunningEvent.Object.(*kapi.Pod); ok {
				fmt.Fprintf(o.Attach.Err, "Pod IP: %s\n", podWithStatus.Status.PodIP)
			}

			// TODO: attach can race with pod completion, allow attach to switch to logs
			return o.Attach.Run()
		}
	})
}
Beispiel #26
0
						Command: []string{"/bin/true"},
					},
				},
			},
		}
		defer podClient.Delete(pod.Name, nil)
		startedPod, err := podClient.Create(pod)
		if err != nil {
			framework.Failf("Error creating a pod: %v", err)
		}
		w, err := podClient.Watch(api.SingleObject(startedPod.ObjectMeta))
		if err != nil {
			framework.Failf("Error watching a pod: %v", err)
		}
		wr := watch.NewRecorder(w)
		event, err := watch.Until(framework.PodStartTimeout, wr, client.PodCompleted)
		Expect(err).To(BeNil())
		framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
		endPod := event.Object.(*api.Pod)

		Expect(endPod.Status.Phase).To(Equal(api.PodSucceeded))
		_, init := api.GetPodCondition(&endPod.Status, api.PodInitialized)
		Expect(init).NotTo(BeNil())
		Expect(init.Status).To(Equal(api.ConditionTrue))

		Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
		for _, status := range endPod.Status.InitContainerStatuses {
			Expect(status.Ready).To(BeTrue())
			Expect(status.State.Terminated).NotTo(BeNil())
			Expect(status.State.Terminated.ExitCode).To(BeZero())
		}
Beispiel #27
-1
func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) api.PodStatus {
	pod := createPodWithAppArmor(f, profile)
	if shouldRun {
		// The pod needs to start before it stops, so wait for the longer start timeout.
		framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace(
			f.ClientSet, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout))
	} else {
		// Pod should remain in the pending state. Wait for the Reason to be set to "AppArmor".
		w, err := f.PodClient().Watch(api.SingleObject(api.ObjectMeta{Name: pod.Name}))
		framework.ExpectNoError(err)
		_, err = watch.Until(framework.PodStartTimeout, w, func(e watch.Event) (bool, error) {
			switch e.Type {
			case watch.Deleted:
				return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, pod.Name)
			}
			switch t := e.Object.(type) {
			case *api.Pod:
				if t.Status.Reason == "AppArmor" {
					return true, nil
				}
			}
			return false, nil
		})
		framework.ExpectNoError(err)
	}
	p, err := f.PodClient().Get(pod.Name)
	framework.ExpectNoError(err)
	return p.Status
}