// Tears down as much of a pod's network as it can even if errors occur.  Returns
// an aggregate error composed of all errors encountered during the teardown.
func (plugin *kubenetNetworkPlugin) teardown(namespace string, name string, id kubecontainer.ContainerID, podIP string) error {
	errList := []error{}

	if podIP != "" {
		glog.V(5).Infof("Removing pod IP %s from shaper", podIP)
		// shaper wants /32
		if err := plugin.shaper().Reset(fmt.Sprintf("%s/32", podIP)); err != nil {
			// Possible bandwidth shaping wasn't enabled for this pod anyways
			glog.V(4).Infof("Failed to remove pod IP %s from shaper: %v", podIP, err)
		}

		delete(plugin.podIPs, id)
	}

	if err := plugin.delContainerFromNetwork(plugin.netConfig, network.DefaultInterfaceName, namespace, name, id); err != nil {
		// This is to prevent returning error when TearDownPod is called twice on the same pod. This helps to reduce event pollution.
		if podIP != "" {
			glog.Warningf("Failed to delete container from kubenet: %v", err)
		} else {
			errList = append(errList, err)
		}
	}

	runningPods, err := plugin.getRunningPods()
	if err == nil {
		err = plugin.hostportHandler.SyncHostports(BridgeName, runningPods)
	}
	if err != nil {
		errList = append(errList, err)
	}

	return utilerrors.NewAggregate(errList)
}
func ensureProcessInContainer(pid int, oomScoreAdj int, manager *fs.Manager) error {
	if runningInHost, err := isProcessRunningInHost(pid); err != nil {
		// Err on the side of caution. Avoid moving the docker daemon unless we are able to identify its context.
		return err
	} else if !runningInHost {
		// Process is running inside a container. Don't touch that.
		return nil
	}

	var errs []error
	cont, err := getContainer(pid)
	if err != nil {
		errs = append(errs, fmt.Errorf("failed to find container of PID %d: %v", pid, err))
	}

	if cont != manager.Cgroups.Name {
		err = manager.Apply(pid)
		if err != nil {
			errs = append(errs, fmt.Errorf("failed to move PID %d (in %q) to %q", pid, cont, manager.Cgroups.Name))
		}
	}

	// Also apply oom-score-adj to processes
	oomAdjuster := oom.NewOOMAdjuster()
	if err := oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err != nil {
		errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d", oomScoreAdj, pid))
	}
	return utilerrors.NewAggregate(errs)
}
// setupKernelTunables validates kernel tunable flags are set as expected
// depending upon the specified option, it will either warn, error, or modify the kernel tunable flags
func setupKernelTunables(option KernelTunableBehavior) error {
	desiredState := map[string]int{
		utilsysctl.VmOvercommitMemory: utilsysctl.VmOvercommitMemoryAlways,
		utilsysctl.VmPanicOnOOM:       utilsysctl.VmPanicOnOOMInvokeOOMKiller,
		utilsysctl.KernelPanic:        utilsysctl.KernelPanicRebootTimeout,
		utilsysctl.KernelPanicOnOops:  utilsysctl.KernelPanicOnOopsAlways,
	}

	errList := []error{}
	for flag, expectedValue := range desiredState {
		val, err := utilsysctl.GetSysctl(flag)
		if err != nil {
			errList = append(errList, err)
			continue
		}
		if val == expectedValue {
			continue
		}

		switch option {
		case KernelTunableError:
			errList = append(errList, fmt.Errorf("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val))
		case KernelTunableWarn:
			glog.V(2).Infof("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)
		case KernelTunableModify:
			glog.V(2).Infof("Updating kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)
			err = utilsysctl.SetSysctl(flag, expectedValue)
			if err != nil {
				errList = append(errList, err)
			}
		}
	}
	return utilerrors.NewAggregate(errList)
}
Exemple #4
0
// ToAggregate converts the ErrorList into an errors.Aggregate.
func (list ErrorList) ToAggregate() utilerrors.Aggregate {
	errs := make([]error, len(list))
	for i := range list {
		errs[i] = list[i]
	}
	return utilerrors.NewAggregate(errs)
}
func NewCmdRolloutPause(f *cmdutil.Factory, out io.Writer) *cobra.Command {
	opts := &PauseConfig{}

	cmd := &cobra.Command{
		Use:     "pause RESOURCE",
		Short:   "Mark the provided resource as paused",
		Long:    pause_long,
		Example: pause_example,
		Run: func(cmd *cobra.Command, args []string) {
			allErrs := []error{}
			err := opts.CompletePause(f, cmd, out, args)
			if err != nil {
				allErrs = append(allErrs, err)
			}
			err = opts.RunPause()
			if err != nil {
				allErrs = append(allErrs, err)
			}
			cmdutil.CheckErr(utilerrors.Flatten(utilerrors.NewAggregate(allErrs)))
		},
	}

	usage := "Filename, directory, or URL to a file identifying the resource to get from a server."
	kubectl.AddJsonFilenameFlag(cmd, &opts.Filenames, usage)
	cmdutil.AddRecursiveFlag(cmd, &opts.Recursive)
	return cmd
}
// InstallREST registers the REST handlers (storage, watch, proxy and redirect) into a restful Container.
// It is expected that the provided path root prefix will serve all operations. Root MUST NOT end
// in a slash.
func (g *APIGroupVersion) InstallREST(container *restful.Container) error {
	installer := g.newInstaller()
	ws := installer.NewWebService()
	apiResources, registrationErrors := installer.Install(ws)
	AddSupportedResourcesWebService(g.Serializer, ws, g.GroupVersion, apiResources)
	container.Add(ws)
	return utilerrors.NewAggregate(registrationErrors)
}
Exemple #7
0
// NewForbidden is a utility function to return a well-formatted admission control error response
func NewForbidden(a Attributes, internalError error) error {
	// do not double wrap an error of same type
	if apierrors.IsForbidden(internalError) {
		return internalError
	}
	name, resource, err := extractResourceName(a)
	if err != nil {
		return apierrors.NewInternalError(utilerrors.NewAggregate([]error{internalError, err}))
	}
	return apierrors.NewForbidden(resource, name, internalError)
}
Exemple #8
0
// Authorizes against a chain of authorizer.Authorizer objects and returns nil if successful and returns error if unsuccessful
func (authzHandler unionAuthzHandler) Authorize(a authorizer.Attributes) error {
	var errlist []error
	for _, currAuthzHandler := range authzHandler {
		err := currAuthzHandler.Authorize(a)
		if err != nil {
			errlist = append(errlist, err)
			continue
		}
		return nil
	}

	return utilerrors.NewAggregate(errlist)
}
Exemple #9
0
// EncodeList ensures that each object in an array is converted to a Unknown{} in serialized form.
// TODO: accept a content type.
func EncodeList(e Encoder, objects []Object) error {
	var errs []error
	for i := range objects {
		data, err := Encode(e, objects[i])
		if err != nil {
			errs = append(errs, err)
			continue
		}
		// TODO: Set ContentEncoding and ContentType.
		objects[i] = &Unknown{Raw: data}
	}
	return errors.NewAggregate(errs)
}
Exemple #10
0
func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
	deployments := reaper.Extensions().Deployments(namespace)
	replicaSets := reaper.Extensions().ReplicaSets(namespace)
	rsReaper, _ := ReaperFor(extensions.Kind("ReplicaSet"), reaper)

	deployment, err := reaper.updateDeploymentWithRetries(namespace, name, func(d *extensions.Deployment) {
		// set deployment's history and scale to 0
		// TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527
		d.Spec.RevisionHistoryLimit = util.Int32Ptr(0)
		d.Spec.Replicas = 0
		d.Spec.Paused = true
	})
	if err != nil {
		return err
	}

	// Use observedGeneration to determine if the deployment controller noticed the pause.
	if err := deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) {
		return deployments.Get(name)
	}, deployment.Generation, 1*time.Second, 1*time.Minute); err != nil {
		return err
	}

	// Stop all replica sets.
	selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
	if err != nil {
		return err
	}

	options := api.ListOptions{LabelSelector: selector}
	rsList, err := replicaSets.List(options)
	if err != nil {
		return err
	}
	errList := []error{}
	for _, rc := range rsList.Items {
		if err := rsReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil {
			scaleGetErr, ok := err.(*ScaleError)
			if !errors.IsNotFound(err) || ok && !errors.IsNotFound(scaleGetErr.ActualError) {
				errList = append(errList, err)
			}
		}
	}
	if len(errList) > 0 {
		return utilerrors.NewAggregate(errList)
	}

	// Delete deployment at the end.
	// Note: We delete deployment at the end so that if removing RSs fails, we atleast have the deployment to retry.
	return deployments.Delete(name, nil)
}
func (o PauseConfig) RunPause() error {
	allErrs := []error{}
	for _, info := range o.Infos {
		isAlreadyPaused, err := o.PauseObject(info.Object)
		if err != nil {
			allErrs = append(allErrs, cmdutil.AddSourceToErr("pausing", info.Source, err))
			continue
		}
		if isAlreadyPaused {
			cmdutil.PrintSuccess(o.Mapper, false, o.Out, info.Mapping.Resource, info.Name, "already paused")
			continue
		}
		cmdutil.PrintSuccess(o.Mapper, false, o.Out, info.Mapping.Resource, info.Name, "paused")
	}
	return utilerrors.NewAggregate(allErrs)
}
// Ensures the system container is created and all non-kernel threads and process 1
// without a container are moved to it.
//
// The reason of leaving kernel threads at root cgroup is that we don't want to tie the
// execution of these threads with to-be defined /system quota and create priority inversions.
//
func ensureSystemCgroups(rootContainer *fs.Manager, manager *fs.Manager) error {
	// Move non-kernel PIDs to the system container.
	attemptsRemaining := 10
	var errs []error
	for attemptsRemaining >= 0 {
		// Only keep errors on latest attempt.
		errs = []error{}
		attemptsRemaining--

		allPids, err := rootContainer.GetPids()
		if err != nil {
			errs = append(errs, fmt.Errorf("failed to list PIDs for root: %v", err))
			continue
		}

		// Remove kernel pids and other protected PIDs (pid 1, PIDs already in system & kubelet containers)
		pids := make([]int, 0, len(allPids))
		for _, pid := range allPids {
			if pid == 1 || isKernelPid(pid) {
				continue
			}

			pids = append(pids, pid)
		}
		glog.Infof("Found %d PIDs in root, %d of them are not to be moved", len(allPids), len(allPids)-len(pids))

		// Check if we have moved all the non-kernel PIDs.
		if len(pids) == 0 {
			break
		}

		glog.Infof("Moving non-kernel processes: %v", pids)
		for _, pid := range pids {
			err := manager.Apply(pid)
			if err != nil {
				errs = append(errs, fmt.Errorf("failed to move PID %d into the system container %q: %v", pid, manager.Cgroups.Name, err))
			}
		}

	}
	if attemptsRemaining < 0 {
		errs = append(errs, fmt.Errorf("ran out of attempts to create system containers %q", manager.Cgroups.Name))
	}

	return utilerrors.NewAggregate(errs)
}
Exemple #13
0
func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
	jobs := reaper.Batch().Jobs(namespace)
	pods := reaper.Pods(namespace)
	scaler, err := ScalerFor(batch.Kind("Job"), *reaper)
	if err != nil {
		return err
	}
	job, err := jobs.Get(name)
	if err != nil {
		return err
	}
	if timeout == 0 {
		// we will never have more active pods than job.Spec.Parallelism
		parallelism := *job.Spec.Parallelism
		timeout = Timeout + time.Duration(10*parallelism)*time.Second
	}

	// TODO: handle overlapping jobs
	retry := NewRetryParams(reaper.pollInterval, reaper.timeout)
	waitForJobs := NewRetryParams(reaper.pollInterval, timeout)
	if err = scaler.Scale(namespace, name, 0, nil, retry, waitForJobs); err != nil {
		return err
	}
	// at this point only dead pods are left, that should be removed
	selector, _ := unversioned.LabelSelectorAsSelector(job.Spec.Selector)
	options := api.ListOptions{LabelSelector: selector}
	podList, err := pods.List(options)
	if err != nil {
		return err
	}
	errList := []error{}
	for _, pod := range podList.Items {
		if err := pods.Delete(pod.Name, gracePeriod); err != nil {
			// ignores the error when the pod isn't found
			if !errors.IsNotFound(err) {
				errList = append(errList, err)
			}
		}
	}
	if len(errList) > 0 {
		return utilerrors.NewAggregate(errList)
	}
	// once we have all the pods removed we can safely remove the job itself
	return jobs.Delete(name, nil)
}
Exemple #14
0
// UpdateREST registers the REST handlers for this APIGroupVersion to an existing web service
// in the restful Container.  It will use the prefix (root/version) to find the existing
// web service.  If a web service does not exist within the container to support the prefix
// this method will return an error.
func (g *APIGroupVersion) UpdateREST(container *restful.Container) error {
	installer := g.newInstaller()
	var ws *restful.WebService = nil

	for i, s := range container.RegisteredWebServices() {
		if s.RootPath() == installer.prefix {
			ws = container.RegisteredWebServices()[i]
			break
		}
	}

	if ws == nil {
		return apierrors.NewInternalError(fmt.Errorf("unable to find an existing webservice for prefix %s", installer.prefix))
	}
	apiResources, registrationErrors := installer.Install(ws)
	AddSupportedResourcesWebService(g.Serializer, ws, g.GroupVersion, apiResources)
	return utilerrors.NewAggregate(registrationErrors)
}
Exemple #15
0
func (f *simpleStrategyFactory) CreateStrategies(psp *extensions.PodSecurityPolicy, namespace string) (*ProviderStrategies, error) {
	errs := []error{}

	userStrat, err := createUserStrategy(&psp.Spec.RunAsUser)
	if err != nil {
		errs = append(errs, err)
	}

	seLinuxStrat, err := createSELinuxStrategy(&psp.Spec.SELinux)
	if err != nil {
		errs = append(errs, err)
	}

	fsGroupStrat, err := createFSGroupStrategy(&psp.Spec.FSGroup)
	if err != nil {
		errs = append(errs, err)
	}

	supGroupStrat, err := createSupplementalGroupStrategy(&psp.Spec.SupplementalGroups)
	if err != nil {
		errs = append(errs, err)
	}

	capStrat, err := createCapabilitiesStrategy(psp.Spec.DefaultAddCapabilities, psp.Spec.RequiredDropCapabilities, psp.Spec.AllowedCapabilities)
	if err != nil {
		errs = append(errs, err)
	}

	if len(errs) > 0 {
		return nil, errors.NewAggregate(errs)
	}

	strategies := &ProviderStrategies{
		RunAsUserStrategy:         userStrat,
		SELinuxStrategy:           seLinuxStrat,
		FSGroupStrategy:           fsGroupStrat,
		SupplementalGroupStrategy: supGroupStrat,
		CapabilitiesStrategy:      capStrat,
	}

	return strategies, nil
}
Exemple #16
0
// InitNetworkPlugin inits the plugin that matches networkPluginName. Plugins must have unique names.
func InitNetworkPlugin(plugins []NetworkPlugin, networkPluginName string, host Host, hairpinMode componentconfig.HairpinMode, nonMasqueradeCIDR string) (NetworkPlugin, error) {
	if networkPluginName == "" {
		// default to the no_op plugin
		plug := &NoopNetworkPlugin{}
		if err := plug.Init(host, hairpinMode, nonMasqueradeCIDR); err != nil {
			return nil, err
		}
		return plug, nil
	}

	pluginMap := map[string]NetworkPlugin{}

	allErrs := []error{}
	for _, plugin := range plugins {
		name := plugin.Name()
		if errs := validation.IsQualifiedName(name); len(errs) != 0 {
			allErrs = append(allErrs, fmt.Errorf("network plugin has invalid name: %q: %s", name, strings.Join(errs, ";")))
			continue
		}

		if _, found := pluginMap[name]; found {
			allErrs = append(allErrs, fmt.Errorf("network plugin %q was registered more than once", name))
			continue
		}
		pluginMap[name] = plugin
	}

	chosenPlugin := pluginMap[networkPluginName]
	if chosenPlugin != nil {
		err := chosenPlugin.Init(host, hairpinMode, nonMasqueradeCIDR)
		if err != nil {
			allErrs = append(allErrs, fmt.Errorf("Network plugin %q failed init: %v", networkPluginName, err))
		} else {
			glog.V(1).Infof("Loaded network plugin %q", networkPluginName)
		}
	} else {
		allErrs = append(allErrs, fmt.Errorf("Network plugin %q not found.", networkPluginName))
	}

	return chosenPlugin, utilerrors.NewAggregate(allErrs)
}
// Ensures that the Docker daemon is in the desired container.
func ensureDockerInContainer(dockerVersion semver.Version, oomScoreAdj int, manager *fs.Manager) error {
	type process struct{ name, file string }
	dockerProcs := []process{{dockerProcessName, dockerPidFile}}
	if dockerVersion.GTE(containerdVersion) {
		dockerProcs = append(dockerProcs, process{containerdProcessName, containerdPidFile})
	}

	var errs []error
	for _, proc := range dockerProcs {
		pids, err := getPidsForProcess(proc.name, proc.file)
		if err != nil {
			errs = append(errs, fmt.Errorf("failed to get pids for %q: %v", proc.name, err))
			continue
		}

		// Move if the pid is not already in the desired container.
		for _, pid := range pids {
			if err := ensureProcessInContainer(pid, oomScoreAdj, manager); err != nil {
				errs = append(errs, fmt.Errorf("errors moving %q pid: %v", proc.name, err))
			}
		}
	}
	return utilerrors.NewAggregate(errs)
}
Exemple #18
0
func (o *ImageOptions) Run() error {
	allErrs := []error{}

	patches := CalculatePatches(o.Infos, o.Encoder, func(info *resource.Info) (bool, error) {
		transformed := false
		_, err := o.UpdatePodSpecForObject(info.Object, func(spec *api.PodSpec) error {
			for name, image := range o.ContainerImages {
				containerFound := false
				// Find the container to update, and update its image
				for i, c := range spec.Containers {
					if c.Name == name || name == "*" {
						spec.Containers[i].Image = image
						containerFound = true
						// Perform updates
						transformed = true
					}
				}
				// Add a new container if not found
				if !containerFound {
					allErrs = append(allErrs, fmt.Errorf("error: unable to find container named %q", name))
				}
			}
			return nil
		})
		return transformed, err
	})

	for _, patch := range patches {
		info := patch.Info
		if patch.Err != nil {
			allErrs = append(allErrs, fmt.Errorf("error: %s/%s %v\n", info.Mapping.Resource, info.Name, patch.Err))
			continue
		}

		// no changes
		if string(patch.Patch) == "{}" || len(patch.Patch) == 0 {
			continue
		}

		if o.Local {
			fmt.Fprintln(o.Out, "running in local mode...")
			return o.PrintObject(o.Cmd, o.Mapper, info.Object, o.Out)
		}

		// patch the change
		obj, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, api.StrategicMergePatchType, patch.Patch)
		if err != nil {
			allErrs = append(allErrs, fmt.Errorf("failed to patch image update to pod template: %v\n", err))
			continue
		}
		info.Refresh(obj, true)

		// record this change (for rollout history)
		if o.Record || cmdutil.ContainsChangeCause(info) {
			if err := cmdutil.RecordChangeCause(obj, o.ChangeCause); err == nil {
				if obj, err = resource.NewHelper(info.Client, info.Mapping).Replace(info.Namespace, info.Name, false, obj); err != nil {
					allErrs = append(allErrs, fmt.Errorf("changes to %s/%s can't be recorded: %v\n", info.Mapping.Resource, info.Name, err))
				}
			}
		}

		info.Refresh(obj, true)
		cmdutil.PrintSuccess(o.Mapper, o.ShortOutput, o.Out, info.Mapping.Resource, info.Name, "image updated")
	}
	return utilerrors.NewAggregate(allErrs)
}
// Error implements the error interface
func (e errConfigurationInvalid) Error() string {
	return fmt.Sprintf("invalid configuration: %v", utilerrors.NewAggregate(e).Error())
}
Exemple #20
0
func (b *Builder) visitorResult() *Result {
	if len(b.errs) > 0 {
		return &Result{err: utilerrors.NewAggregate(b.errs)}
	}

	if b.selectAll {
		b.selector = labels.Everything()
	}

	// visit selectors
	if b.selector != nil {
		if len(b.names) != 0 {
			return &Result{err: fmt.Errorf("name cannot be provided when a selector is specified")}
		}
		if len(b.resourceTuples) != 0 {
			return &Result{err: fmt.Errorf("selectors and the all flag cannot be used when passing resource/name arguments")}
		}
		if len(b.resources) == 0 {
			return &Result{err: fmt.Errorf("at least one resource must be specified to use a selector")}
		}
		// empty selector has different error message for paths being provided
		if len(b.paths) != 0 {
			if b.selector.Empty() {
				return &Result{err: fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify a resource by arguments as well")}
			} else {
				return &Result{err: fmt.Errorf("a selector may not be specified when path, URL, or stdin is provided as input")}
			}
		}
		mappings, err := b.resourceMappings()
		if err != nil {
			return &Result{err: err}
		}

		visitors := []Visitor{}
		for _, mapping := range mappings {
			client, err := b.mapper.ClientForMapping(mapping)
			if err != nil {
				return &Result{err: err}
			}
			selectorNamespace := b.namespace
			if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
				selectorNamespace = ""
			}
			visitors = append(visitors, NewSelector(client, mapping, selectorNamespace, b.selector, b.export))
		}
		if b.continueOnError {
			return &Result{visitor: EagerVisitorList(visitors), sources: visitors}
		}
		return &Result{visitor: VisitorList(visitors), sources: visitors}
	}

	// visit items specified by resource and name
	if len(b.resourceTuples) != 0 {
		// if b.singular is false, this could be by default, so double-check length
		// of resourceTuples to determine if in fact it is singular or not
		isSingular := b.singular
		if !isSingular {
			isSingular = len(b.resourceTuples) == 1
		}

		if len(b.paths) != 0 {
			return &Result{singular: isSingular, err: fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify a resource by arguments as well")}
		}
		if len(b.resources) != 0 {
			return &Result{singular: isSingular, err: fmt.Errorf("you may not specify individual resources and bulk resources in the same call")}
		}

		// retrieve one client for each resource
		mappings, err := b.resourceTupleMappings()
		if err != nil {
			return &Result{singular: isSingular, err: err}
		}
		clients := make(map[string]RESTClient)
		for _, mapping := range mappings {
			s := fmt.Sprintf("%s/%s", mapping.GroupVersionKind.GroupVersion().String(), mapping.Resource)
			if _, ok := clients[s]; ok {
				continue
			}
			client, err := b.mapper.ClientForMapping(mapping)
			if err != nil {
				return &Result{err: err}
			}
			clients[s] = client
		}

		items := []Visitor{}
		for _, tuple := range b.resourceTuples {
			mapping, ok := mappings[tuple.Resource]
			if !ok {
				return &Result{singular: isSingular, err: fmt.Errorf("resource %q is not recognized: %v", tuple.Resource, mappings)}
			}
			s := fmt.Sprintf("%s/%s", mapping.GroupVersionKind.GroupVersion().String(), mapping.Resource)
			client, ok := clients[s]
			if !ok {
				return &Result{singular: isSingular, err: fmt.Errorf("could not find a client for resource %q", tuple.Resource)}
			}

			selectorNamespace := b.namespace
			if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
				selectorNamespace = ""
			} else {
				if len(b.namespace) == 0 {
					return &Result{singular: isSingular, err: fmt.Errorf("namespace may not be empty when retrieving a resource by name")}
				}
			}

			info := NewInfo(client, mapping, selectorNamespace, tuple.Name, b.export)
			items = append(items, info)
		}

		var visitors Visitor
		if b.continueOnError {
			visitors = EagerVisitorList(items)
		} else {
			visitors = VisitorList(items)
		}
		return &Result{singular: isSingular, visitor: visitors, sources: items}
	}

	// visit items specified by name
	if len(b.names) != 0 {
		isSingular := len(b.names) == 1

		if len(b.paths) != 0 {
			return &Result{singular: isSingular, err: fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify a resource by arguments as well")}
		}
		if len(b.resources) == 0 {
			return &Result{singular: isSingular, err: fmt.Errorf("you must provide a resource and a resource name together")}
		}
		if len(b.resources) > 1 {
			return &Result{singular: isSingular, err: fmt.Errorf("you must specify only one resource")}
		}

		mappings, err := b.resourceMappings()
		if err != nil {
			return &Result{singular: isSingular, err: err}
		}
		mapping := mappings[0]

		client, err := b.mapper.ClientForMapping(mapping)
		if err != nil {
			return &Result{err: err}
		}

		selectorNamespace := b.namespace
		if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
			selectorNamespace = ""
		} else {
			if len(b.namespace) == 0 {
				return &Result{singular: isSingular, err: fmt.Errorf("namespace may not be empty when retrieving a resource by name")}
			}
		}

		visitors := []Visitor{}
		for _, name := range b.names {
			info := NewInfo(client, mapping, selectorNamespace, name, b.export)
			visitors = append(visitors, info)
		}
		return &Result{singular: isSingular, visitor: VisitorList(visitors), sources: visitors}
	}

	// visit items specified by paths
	if len(b.paths) != 0 {
		singular := !b.dir && !b.stream && len(b.paths) == 1
		if len(b.resources) != 0 {
			return &Result{singular: singular, err: fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify resource arguments as well")}
		}

		var visitors Visitor
		if b.continueOnError {
			visitors = EagerVisitorList(b.paths)
		} else {
			visitors = VisitorList(b.paths)
		}

		// only items from disk can be refetched
		if b.latest {
			// must flatten lists prior to fetching
			if b.flatten {
				visitors = NewFlattenListVisitor(visitors, b.mapper)
			}
			// must set namespace prior to fetching
			if b.defaultNamespace {
				visitors = NewDecoratedVisitor(visitors, SetNamespace(b.namespace))
			}
			visitors = NewDecoratedVisitor(visitors, RetrieveLatest)
		}
		return &Result{singular: singular, visitor: visitors, sources: b.paths}
	}

	if len(b.resources) != 0 {
		return &Result{err: fmt.Errorf("resource(s) were provided, but no name, label selector, or --all flag specified")}
	}
	return &Result{err: fmt.Errorf("you must provide one or more resources by argument or filename (%s)", strings.Join(InputExtensions, "|"))}
}