Beispiel #1
0
// AuthenticateRequest authenticates the request using presented client certificates
func (a *Authenticator) AuthenticateRequest(req *http.Request) (user.Info, bool, error) {
	if req.TLS == nil || len(req.TLS.PeerCertificates) == 0 {
		return nil, false, nil
	}

	// Use intermediates, if provided
	optsCopy := a.opts
	if optsCopy.Intermediates == nil && len(req.TLS.PeerCertificates) > 1 {
		optsCopy.Intermediates = x509.NewCertPool()
		for _, intermediate := range req.TLS.PeerCertificates[1:] {
			optsCopy.Intermediates.AddCert(intermediate)
		}
	}

	chains, err := req.TLS.PeerCertificates[0].Verify(optsCopy)
	if err != nil {
		return nil, false, err
	}

	var errlist []error
	for _, chain := range chains {
		user, ok, err := a.user.User(chain)
		if err != nil {
			errlist = append(errlist, err)
			continue
		}

		if ok {
			return user, ok, err
		}
	}
	return nil, false, utilerrors.NewAggregate(errlist)
}
Beispiel #2
0
func NewCmdRolloutUndo(f cmdutil.Factory, out io.Writer) *cobra.Command {
	options := &UndoOptions{}

	validArgs := []string{"deployment"}
	argAliases := kubectl.ResourceAliases(validArgs)

	cmd := &cobra.Command{
		Use:     "undo (TYPE NAME | TYPE/NAME) [flags]",
		Short:   "Undo a previous rollout",
		Long:    undo_long,
		Example: undo_example,
		Run: func(cmd *cobra.Command, args []string) {
			allErrs := []error{}
			err := options.CompleteUndo(f, cmd, out, args)
			if err != nil {
				allErrs = append(allErrs, err)
			}
			err = options.RunUndo()
			if err != nil {
				allErrs = append(allErrs, err)
			}
			cmdutil.CheckErr(utilerrors.Flatten(utilerrors.NewAggregate(allErrs)))
		},
		ValidArgs:  validArgs,
		ArgAliases: argAliases,
	}

	cmd.Flags().Int64("to-revision", 0, "The revision to rollback to. Default to 0 (last revision).")
	usage := "identifying the resource to get from a server."
	cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, usage)
	cmdutil.AddDryRunFlag(cmd)
	return cmd
}
func ensureProcessInContainerWithOOMScore(pid int, oomScoreAdj int, manager *fs.Manager) error {
	if runningInHost, err := isProcessRunningInHost(pid); err != nil {
		// Err on the side of caution. Avoid moving the docker daemon unless we are able to identify its context.
		return err
	} else if !runningInHost {
		// Process is running inside a container. Don't touch that.
		glog.V(2).Infof("pid %d is not running in the host namespaces", pid)
		return nil
	}

	var errs []error
	if manager != nil {
		cont, err := getContainer(pid)
		if err != nil {
			errs = append(errs, fmt.Errorf("failed to find container of PID %d: %v", pid, err))
		}

		if cont != manager.Cgroups.Name {
			err = manager.Apply(pid)
			if err != nil {
				errs = append(errs, fmt.Errorf("failed to move PID %d (in %q) to %q: %v", pid, cont, manager.Cgroups.Name, err))
			}
		}
	}

	// Also apply oom-score-adj to processes
	oomAdjuster := oom.NewOOMAdjuster()
	glog.V(5).Infof("attempting to apply oom_score_adj of %d to pid %d", oomScoreAdj, pid)
	if err := oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err != nil {
		glog.V(3).Infof("Failed to apply oom_score_adj %d for pid %d: %v", oomScoreAdj, pid, err)
		errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d: %v", oomScoreAdj, pid, err))
	}
	return utilerrors.NewAggregate(errs)
}
Beispiel #4
0
func validateArguments(cmd *cobra.Command, filenames, args []string) error {
	deploymentKey := cmdutil.GetFlagString(cmd, "deployment-label-key")
	image := cmdutil.GetFlagString(cmd, "image")
	rollback := cmdutil.GetFlagBool(cmd, "rollback")

	errors := []error{}
	if len(deploymentKey) == 0 {
		errors = append(errors, cmdutil.UsageError(cmd, "--deployment-label-key can not be empty"))
	}
	if len(filenames) > 1 {
		errors = append(errors, cmdutil.UsageError(cmd, "May only specify a single filename for new controller"))
	}

	if !rollback {
		if len(filenames) == 0 && len(image) == 0 {
			errors = append(errors, cmdutil.UsageError(cmd, "Must specify --filename or --image for new controller"))
		} else if len(filenames) != 0 && len(image) != 0 {
			errors = append(errors, cmdutil.UsageError(cmd, "--filename and --image can not both be specified"))
		}
	} else {
		if len(filenames) != 0 || len(image) != 0 {
			errors = append(errors, cmdutil.UsageError(cmd, "Don't specify --filename or --image on rollback"))
		}
	}

	if len(args) < 1 {
		errors = append(errors, cmdutil.UsageError(cmd, "Must specify the controller to update"))
	}

	return utilerrors.NewAggregate(errors)
}
// RESTMapping provides the REST mapping for the resource based on the
// kind and version. This implementation supports multiple REST schemas and
// return the first match.
func (m MultiRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) {
	allMappings := []*RESTMapping{}
	errors := []error{}

	for _, t := range m {
		currMapping, err := t.RESTMapping(gk, versions...)
		// ignore "no match" errors, but any other error percolates back up
		if IsNoMatchError(err) {
			continue
		}
		if err != nil {
			errors = append(errors, err)
			continue
		}

		allMappings = append(allMappings, currMapping)
	}

	// if we got exactly one mapping, then use it even if other requested failed
	if len(allMappings) == 1 {
		return allMappings[0], nil
	}
	if len(allMappings) > 1 {
		var kinds []schema.GroupVersionKind
		for _, m := range allMappings {
			kinds = append(kinds, m.GroupVersionKind)
		}
		return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds}
	}
	if len(errors) > 0 {
		return nil, utilerrors.NewAggregate(errors)
	}
	return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")}
}
func TestStatefulSetControllerRespectsOrder(t *testing.T) {
	psc, fc := newFakeStatefulSetController()
	replicas := 4
	ps := newStatefulSet(replicas)

	saturateStatefulSet(t, ps, psc, fc)

	errs := []error{}
	*(ps.Spec.Replicas) = 0
	// Shuffle known list and check that pets are deleted in reverse
	knownPods := fc.getPodList()
	for i := range knownPods {
		j := rand.Intn(i + 1)
		knownPods[i], knownPods[j] = knownPods[j], knownPods[i]
	}

	for i := 0; i < replicas; i++ {
		if len(fc.pets) != replicas-i {
			t.Errorf("Unexpected number of pods, expected %d found %d", i, len(fc.pets))
		}
		if _, syncErr := psc.syncStatefulSet(ps, knownPods); syncErr != nil {
			errs = append(errs, syncErr)
		}
		checkPets(ps, replicas, i+1, fc, t)
	}
	if len(errs) != 0 {
		t.Errorf("Error syncing StatefulSet: %v", errors.NewAggregate(errs))
	}
}
Beispiel #7
0
// reorganizeTaints returns the updated set of taints, taking into account old taints that were not updated,
// old taints that were updated, old taints that were deleted, and new taints.
func reorganizeTaints(accessor metav1.Object, overwrite bool, taintsToAdd []v1.Taint, taintsToRemove []v1.Taint) ([]v1.Taint, error) {
	newTaints := append([]v1.Taint{}, taintsToAdd...)

	var oldTaints []v1.Taint
	var err error
	annotations := accessor.GetAnnotations()
	if annotations != nil {
		if oldTaints, err = v1.GetTaintsFromNodeAnnotations(annotations); err != nil {
			return nil, err
		}
	}

	// add taints that already existing but not updated to newTaints
	for _, oldTaint := range oldTaints {
		existsInNew := false
		for _, taint := range newTaints {
			if taint.MatchTaint(oldTaint) {
				existsInNew = true
				break
			}
		}
		if !existsInNew {
			newTaints = append(newTaints, oldTaint)
		}
	}

	allErrs := []error{}
	for _, taintToRemove := range taintsToRemove {
		newTaints, err = deleteTaint(newTaints, taintToRemove)
		if err != nil {
			allErrs = append(allErrs, err)
		}
	}
	return newTaints, utilerrors.NewAggregate(allErrs)
}
Beispiel #8
0
func (b *Builder) visitorResult() *Result {
	if len(b.errs) > 0 {
		return &Result{err: utilerrors.NewAggregate(b.errs)}
	}

	if b.selectAll {
		b.selector = labels.Everything()
	}

	// visit items specified by paths
	if len(b.paths) != 0 {
		return b.visitByPaths()
	}

	// visit selectors
	if b.selector != nil {
		return b.visitBySelector()
	}

	// visit items specified by resource and name
	if len(b.resourceTuples) != 0 {
		return b.visitByResource()
	}

	// visit items specified by name
	if len(b.names) != 0 {
		return b.visitByName()
	}

	if len(b.resources) != 0 {
		return &Result{err: fmt.Errorf("resource(s) were provided, but no name, label selector, or --all flag specified")}
	}
	return &Result{err: missingResourceError}
}
func (o ResumeConfig) RunResume() error {
	allErrs := []error{}
	for _, patch := range set.CalculatePatches(o.Infos, o.Encoder, o.Resumer) {
		info := patch.Info

		if patch.Err != nil {
			allErrs = append(allErrs, fmt.Errorf("error: %s %q %v", info.Mapping.Resource, info.Name, patch.Err))
			continue
		}

		if string(patch.Patch) == "{}" || len(patch.Patch) == 0 {
			cmdutil.PrintSuccess(o.Mapper, false, o.Out, info.Mapping.Resource, info.Name, false, "already resumed")
			continue
		}

		obj, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch)
		if err != nil {
			allErrs = append(allErrs, fmt.Errorf("failed to patch: %v", err))
			continue
		}

		info.Refresh(obj, true)
		cmdutil.PrintSuccess(o.Mapper, false, o.Out, info.Mapping.Resource, info.Name, false, "resumed")
	}

	return utilerrors.NewAggregate(allErrs)
}
Beispiel #10
0
func NewCmdRolloutResume(f cmdutil.Factory, out io.Writer) *cobra.Command {
	options := &ResumeConfig{}

	validArgs := []string{"deployment"}
	argAliases := kubectl.ResourceAliases(validArgs)

	cmd := &cobra.Command{
		Use:     "resume RESOURCE",
		Short:   "Resume a paused resource",
		Long:    resume_long,
		Example: resume_example,
		Run: func(cmd *cobra.Command, args []string) {
			allErrs := []error{}
			err := options.CompleteResume(f, cmd, out, args)
			if err != nil {
				allErrs = append(allErrs, err)
			}
			err = options.RunResume()
			if err != nil {
				allErrs = append(allErrs, err)
			}
			cmdutil.CheckErr(utilerrors.Flatten(utilerrors.NewAggregate(allErrs)))
		},
		ValidArgs:  validArgs,
		ArgAliases: argAliases,
	}

	usage := "identifying the resource to get from a server."
	cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, usage)
	return cmd
}
// Scan through the whole cgroup directory and kill all processes either
// attached to the pod cgroup or to a container cgroup under the pod cgroup
func (m *podContainerManagerImpl) tryKillingCgroupProcesses(podCgroup CgroupName) error {
	pidsToKill := m.cgroupManager.Pids(podCgroup)
	// No pids charged to the terminated pod cgroup return
	if len(pidsToKill) == 0 {
		return nil
	}

	var errlist []error
	// os.Kill often errors out,
	// We try killing all the pids multiple times
	for i := 0; i < 5; i++ {
		if i != 0 {
			glog.V(3).Infof("Attempt %v failed to kill all unwanted process. Retyring", i)
		}
		errlist = []error{}
		for _, pid := range pidsToKill {
			p, err := os.FindProcess(pid)
			if err != nil {
				// Process not running anymore, do nothing
				continue
			}
			glog.V(3).Infof("Attempt to kill process with pid: %v", pid)
			if err := p.Kill(); err != nil {
				glog.V(3).Infof("failed to kill process with pid: %v", pid)
				errlist = append(errlist, err)
			}
		}
		if len(errlist) == 0 {
			glog.V(3).Infof("successfully killed all unwanted processes.")
			return nil
		}
	}
	return utilerrors.NewAggregate(errlist)
}
Beispiel #12
0
func (sysver SystemVerificationCheck) Check() (warnings, errors []error) {
	// Create a buffered writer and choose a quite large value (1M) and suppose the output from the system verification test won't exceed the limit
	// Run the system verification check, but write to out buffered writer instead of stdout
	bufw := bufio.NewWriterSize(os.Stdout, 1*1024*1024)
	reporter := &system.StreamReporter{WriteStream: bufw}

	var errs []error
	// All the validators we'd like to run:
	var validators = []system.Validator{
		&system.OSValidator{Reporter: reporter},
		&system.KernelValidator{Reporter: reporter},
		&system.CgroupsValidator{Reporter: reporter},
		&system.DockerValidator{Reporter: reporter},
	}

	// Run all validators
	for _, v := range validators {
		errs = append(errs, v.Validate(system.DefaultSysSpec))
	}

	err := utilerrors.NewAggregate(errs)
	if err != nil {
		// Only print the output from the system verification check if the check failed
		fmt.Println("[preflight] The system verification failed. Printing the output from the verification:")
		bufw.Flush()
		return nil, []error{err}
	}
	return nil, nil
}
Beispiel #13
0
// cleanupDeployment is responsible for cleaning up a deployment ie. retains all but the latest N old replica sets
// where N=d.Spec.RevisionHistoryLimit. Old replica sets are older versions of the podtemplate of a deployment kept
// around by default 1) for historical reasons and 2) for the ability to rollback a deployment.
func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) error {
	if deployment.Spec.RevisionHistoryLimit == nil {
		return nil
	}
	diff := int32(len(oldRSs)) - *deployment.Spec.RevisionHistoryLimit
	if diff <= 0 {
		return nil
	}

	sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))

	var errList []error
	// TODO: This should be parallelized.
	for i := int32(0); i < diff; i++ {
		rs := oldRSs[i]
		// Avoid delete replica set with non-zero replica counts
		if rs.Status.Replicas != 0 || *(rs.Spec.Replicas) != 0 || rs.Generation > rs.Status.ObservedGeneration {
			continue
		}
		if err := dc.client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
			glog.V(2).Infof("Failed deleting old replica set %v for deployment %v: %v", rs.Name, deployment.Name, err)
			errList = append(errList, err)
		}
	}

	return utilerrors.NewAggregate(errList)
}
Beispiel #14
0
// InitPlugins initializes each plugin.  All plugins must have unique names.
// This must be called exactly once before any New* methods are called on any
// plugins.
func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, host VolumeHost) error {
	pm.mutex.Lock()
	defer pm.mutex.Unlock()

	if pm.plugins == nil {
		pm.plugins = map[string]VolumePlugin{}
	}

	allErrs := []error{}
	for _, plugin := range plugins {
		name := plugin.GetPluginName()
		if errs := validation.IsQualifiedName(name); len(errs) != 0 {
			allErrs = append(allErrs, fmt.Errorf("volume plugin has invalid name: %q: %s", name, strings.Join(errs, ";")))
			continue
		}

		if _, found := pm.plugins[name]; found {
			allErrs = append(allErrs, fmt.Errorf("volume plugin %q was registered more than once", name))
			continue
		}
		err := plugin.Init(host)
		if err != nil {
			glog.Errorf("Failed to load volume plugin %s, error: %s", plugin, err.Error())
			allErrs = append(allErrs, err)
			continue
		}
		pm.plugins[name] = plugin
		glog.V(1).Infof("Loaded volume plugin %q", name)
	}
	return utilerrors.NewAggregate(allErrs)
}
Beispiel #15
0
func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
	deployments := reaper.dClient.Deployments(namespace)
	replicaSets := reaper.rsClient.ReplicaSets(namespace)
	rsReaper := &ReplicaSetReaper{reaper.rsClient, reaper.pollInterval, reaper.timeout}

	deployment, err := reaper.updateDeploymentWithRetries(namespace, name, func(d *extensions.Deployment) {
		// set deployment's history and scale to 0
		// TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527
		d.Spec.RevisionHistoryLimit = util.Int32Ptr(0)
		d.Spec.Replicas = 0
		d.Spec.Paused = true
	})
	if err != nil {
		return err
	}

	// Use observedGeneration to determine if the deployment controller noticed the pause.
	if err := deploymentutil.WaitForObservedDeploymentInternal(func() (*extensions.Deployment, error) {
		return deployments.Get(name, metav1.GetOptions{})
	}, deployment.Generation, 1*time.Second, 1*time.Minute); err != nil {
		return err
	}

	// Do not cascade deletion for overlapping deployments.
	if len(deployment.Annotations[deploymentutil.OverlapAnnotation]) > 0 {
		return deployments.Delete(name, nil)
	}

	// Stop all replica sets.
	selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
	if err != nil {
		return err
	}

	options := api.ListOptions{LabelSelector: selector}
	rsList, err := replicaSets.List(options)
	if err != nil {
		return err
	}
	errList := []error{}
	for _, rc := range rsList.Items {
		if err := rsReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil {
			scaleGetErr, ok := err.(ScaleError)
			if errors.IsNotFound(err) || (ok && errors.IsNotFound(scaleGetErr.ActualError)) {
				continue
			}
			errList = append(errList, err)
		}
	}
	if len(errList) > 0 {
		return utilerrors.NewAggregate(errList)
	}

	// Delete deployment at the end.
	// Note: We delete deployment at the end so that if removing RSs fails, we at least have the deployment to retry.
	var falseVar = false
	nonOrphanOption := api.DeleteOptions{OrphanDependents: &falseVar}
	return deployments.Delete(name, &nonOrphanOption)
}
Beispiel #16
0
func validateNoOverwrites(accessor metav1.Object, labels map[string]string) error {
	allErrs := []error{}
	for key := range labels {
		if value, found := accessor.GetLabels()[key]; found {
			allErrs = append(allErrs, fmt.Errorf("'%s' already has a value (%s), and --overwrite is false", key, value))
		}
	}
	return utilerrors.NewAggregate(allErrs)
}
func (c configValidationTest) testAuthInfo(authInfoName string, t *testing.T) {
	errs := validateAuthInfo(authInfoName, *c.config.AuthInfos[authInfoName])

	if len(c.expectedErrorSubstring) != 0 {
		if len(errs) == 0 {
			t.Errorf("Expected error containing: %v", c.expectedErrorSubstring)
		}
		for _, curr := range c.expectedErrorSubstring {
			if len(errs) != 0 && !strings.Contains(utilerrors.NewAggregate(errs).Error(), curr) {
				t.Errorf("Expected error containing: %v, but got %v", c.expectedErrorSubstring, utilerrors.NewAggregate(errs))
			}
		}

	} else {
		if len(errs) != 0 {
			t.Errorf("Unexpected error: %v", utilerrors.NewAggregate(errs))
		}
	}
}
Beispiel #18
0
func (c ConjunctiveSchema) ValidateBytes(data []byte) error {
	var list []error = nil
	schemas := []Schema(c)
	for ix := range schemas {
		if err := schemas[ix].ValidateBytes(data); err != nil {
			list = append(list, err)
		}
	}
	return utilerrors.NewAggregate(list)
}
Beispiel #19
0
func (NoDoubleKeySchema) ValidateBytes(data []byte) error {
	var list []error = nil
	if err := validateNoDuplicateKeys(data, "metadata", "labels"); err != nil {
		list = append(list, err)
	}
	if err := validateNoDuplicateKeys(data, "metadata", "annotations"); err != nil {
		list = append(list, err)
	}
	return utilerrors.NewAggregate(list)
}
Beispiel #20
0
func validateFlags(cmd *cobra.Command) error {
	errs := []error{}
	max, min := cmdutil.GetFlagInt(cmd, "max"), cmdutil.GetFlagInt(cmd, "min")
	if max < 1 {
		errs = append(errs, fmt.Errorf("--max=MAXPODS is required and must be at least 1, max: %d", max))
	}
	if max < min {
		errs = append(errs, fmt.Errorf("--max=MAXPODS must be larger or equal to --min=MINPODS, max: %d, min: %d", max, min))
	}
	return utilerrors.NewAggregate(errs)
}
Beispiel #21
0
// NewForbidden is a utility function to return a well-formatted admission control error response
func NewForbidden(a Attributes, internalError error) error {
	// do not double wrap an error of same type
	if apierrors.IsForbidden(internalError) {
		return internalError
	}
	name, resource, err := extractResourceName(a)
	if err != nil {
		return apierrors.NewInternalError(utilerrors.NewAggregate([]error{internalError, err}))
	}
	return apierrors.NewForbidden(resource, name, internalError)
}
func (k *KernelValidator) Validate(spec SysSpec) error {
	release, err := exec.Command("uname", "-r").CombinedOutput()
	if err != nil {
		return fmt.Errorf("failed to get kernel release: %v", err)
	}
	k.kernelRelease = strings.TrimSpace(string(release))
	var errs []error
	errs = append(errs, k.validateKernelVersion(spec.KernelSpec))
	errs = append(errs, k.validateKernelConfig(spec.KernelSpec))
	return errors.NewAggregate(errs)
}
Beispiel #23
0
// ValidateParams ensures that all required params are present in the params map
func ValidateParams(paramSpec []GeneratorParam, params map[string]interface{}) error {
	allErrs := []error{}
	for ix := range paramSpec {
		if paramSpec[ix].Required {
			value, found := params[paramSpec[ix].Name]
			if !found || IsZero(value) {
				allErrs = append(allErrs, fmt.Errorf("Parameter: %s is required", paramSpec[ix].Name))
			}
		}
	}
	return utilerrors.NewAggregate(allErrs)
}
Beispiel #24
0
func (o *UndoOptions) RunUndo() error {
	allErrs := []error{}
	for ix, info := range o.Infos {
		result, err := o.Rollbackers[ix].Rollback(info.Object, nil, o.ToRevision, o.DryRun)
		if err != nil {
			allErrs = append(allErrs, cmdutil.AddSourceToErr("undoing", info.Source, err))
			continue
		}
		cmdutil.PrintSuccess(o.Mapper, false, o.Out, info.Mapping.Resource, info.Name, false, result)
	}
	return utilerrors.NewAggregate(allErrs)
}
Beispiel #25
0
// InstallREST registers the REST handlers (storage, watch, proxy and redirect) into a restful Container.
// It is expected that the provided path root prefix will serve all operations. Root MUST NOT end
// in a slash.
func (g *APIGroupVersion) InstallREST(container *restful.Container) error {
	installer := g.newInstaller()
	ws := installer.NewWebService()
	apiResources, registrationErrors := installer.Install(ws)
	lister := g.ResourceLister
	if lister == nil {
		lister = staticLister{apiResources}
	}
	AddSupportedResourcesWebService(g.Serializer, ws, g.GroupVersion, lister)
	container.Add(ws)
	return utilerrors.NewAggregate(registrationErrors)
}
Beispiel #26
0
func (o *ImageOptions) Validate() error {
	errors := []error{}
	if len(o.Resources) < 1 && cmdutil.IsFilenameEmpty(o.Filenames) {
		errors = append(errors, fmt.Errorf("one or more resources must be specified as <resource> <name> or <resource>/<name>"))
	}
	if len(o.ContainerImages) < 1 {
		errors = append(errors, fmt.Errorf("at least one image update is required"))
	} else if len(o.ContainerImages) > 1 && hasWildcardKey(o.ContainerImages) {
		errors = append(errors, fmt.Errorf("all containers are already specified by *, but saw more than one container_name=container_image pairs"))
	}
	return utilerrors.NewAggregate(errors)
}
// deletePods will delete all pods from master running on given node, and return true
// if any pods were deleted, or were found pending deletion.
func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore cache.StoreToDaemonSetLister) (bool, error) {
	remaining := false
	selector := fields.OneTermEqualSelector(api.PodHostField, nodeName).String()
	options := v1.ListOptions{FieldSelector: selector}
	pods, err := kubeClient.Core().Pods(v1.NamespaceAll).List(options)
	var updateErrList []error

	if err != nil {
		return remaining, err
	}

	if len(pods.Items) > 0 {
		recordNodeEvent(recorder, nodeName, nodeUID, v1.EventTypeNormal, "DeletingAllPods", fmt.Sprintf("Deleting all Pods from Node %v.", nodeName))
	}

	for _, pod := range pods.Items {
		// Defensive check, also needed for tests.
		if pod.Spec.NodeName != nodeName {
			continue
		}

		// Set reason and message in the pod object.
		if _, err = setPodTerminationReason(kubeClient, &pod, nodeName); err != nil {
			if errors.IsConflict(err) {
				updateErrList = append(updateErrList,
					fmt.Errorf("update status failed for pod %q: %v", format.Pod(&pod), err))
				continue
			}
		}
		// if the pod has already been marked for deletion, we still return true that there are remaining pods.
		if pod.DeletionGracePeriodSeconds != nil {
			remaining = true
			continue
		}
		// if the pod is managed by a daemonset, ignore it
		_, err := daemonStore.GetPodDaemonSets(&pod)
		if err == nil { // No error means at least one daemonset was found
			continue
		}

		glog.V(2).Infof("Starting deletion of pod %v", pod.Name)
		recorder.Eventf(&pod, v1.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
		if err := kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
			return false, err
		}
		remaining = true
	}

	if len(updateErrList) > 0 {
		return false, utilerrors.NewAggregate(updateErrList)
	}
	return remaining, nil
}
Beispiel #28
0
// Validate checks that the provided attach options are specified.
func (p *AttachOptions) Validate() error {
	allErrs := []error{}
	if len(p.PodName) == 0 {
		allErrs = append(allErrs, fmt.Errorf("pod name must be specified"))
	}
	if p.Out == nil || p.Err == nil {
		allErrs = append(allErrs, fmt.Errorf("both output and error output must be provided"))
	}
	if p.Attach == nil || p.PodClient == nil || p.Config == nil {
		allErrs = append(allErrs, fmt.Errorf("client, client config, and attach must be provided"))
	}
	return utilerrors.NewAggregate(allErrs)
}
Beispiel #29
0
// EncodeList ensures that each object in an array is converted to a Unknown{} in serialized form.
// TODO: accept a content type.
func EncodeList(e Encoder, objects []Object) error {
	var errs []error
	for i := range objects {
		data, err := Encode(e, objects[i])
		if err != nil {
			errs = append(errs, err)
			continue
		}
		// TODO: Set ContentEncoding and ContentType.
		objects[i] = &Unknown{Raw: data}
	}
	return errors.NewAggregate(errs)
}
Beispiel #30
0
// ToAggregate converts the ErrorList into an errors.Aggregate.
func (list ErrorList) ToAggregate() utilerrors.Aggregate {
	errs := make([]error, 0, len(list))
	errorMsgs := sets.NewString()
	for _, err := range list {
		msg := fmt.Sprintf("%v", err)
		if errorMsgs.Has(msg) {
			continue
		}
		errorMsgs.Insert(msg)
		errs = append(errs, err)
	}
	return utilerrors.NewAggregate(errs)
}