func (s *SwaggerSchema) ValidateBytes(data []byte) error { var obj interface{} out, err := yaml.ToJSON(data) if err != nil { return err } data = out if err := json.Unmarshal(data, &obj); err != nil { return err } fields, ok := obj.(map[string]interface{}) if !ok { return fmt.Errorf("error in unmarshaling data %s", string(data)) } apiVersion := fields["apiVersion"] if apiVersion == nil { return fmt.Errorf("apiVersion not set") } kind := fields["kind"] if kind == nil { return fmt.Errorf("kind not set") } allErrs := s.ValidateObject(obj, apiVersion.(string), "", apiVersion.(string)+"."+kind.(string)) if len(allErrs) == 1 { return allErrs[0] } return errors.NewAggregate(allErrs) }
// InitPlugins initializes each plugin. All plugins must have unique names. // This must be called exactly once before any New* methods are called on any // plugins. func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, host VolumeHost) error { pm.mutex.Lock() defer pm.mutex.Unlock() if pm.plugins == nil { pm.plugins = map[string]VolumePlugin{} } allErrs := []error{} for _, plugin := range plugins { name := plugin.Name() if !validation.IsQualifiedName(name) { allErrs = append(allErrs, fmt.Errorf("volume plugin has invalid name: %#v", plugin)) continue } if _, found := pm.plugins[name]; found { allErrs = append(allErrs, fmt.Errorf("volume plugin %q was registered more than once", name)) continue } plugin.Init(host) pm.plugins[name] = plugin glog.V(1).Infof("Loaded volume plugin %q", name) } return errors.NewAggregate(allErrs) }
// AuthenticateRequest authenticates the request using presented client certificates func (a *Authenticator) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { if req.TLS == nil { return nil, false, nil } var errlist []error for _, cert := range req.TLS.PeerCertificates { chains, err := cert.Verify(a.opts) if err != nil { errlist = append(errlist, err) continue } for _, chain := range chains { user, ok, err := a.user.User(chain) if err != nil { errlist = append(errlist, err) continue } if ok { return user, ok, err } } } return nil, false, errors.NewAggregate(errlist) }
func statusCausesToAggrError(scs []unversioned.StatusCause) utilerrors.Aggregate { errs := make([]error, len(scs)) for i, sc := range scs { errs[i] = fmt.Errorf("%s: %s", sc.Field, sc.Message) } return utilerrors.NewAggregate(errs) }
func (v FlattenListVisitor) Visit(fn VisitorFunc) error { return v.Visitor.Visit(func(info *Info, err error) error { if err != nil { return err } if info.Object == nil { return fn(info, nil) } items, err := runtime.ExtractList(info.Object) if err != nil { return fn(info, nil) } if errs := runtime.DecodeList(items, struct { runtime.ObjectTyper runtime.Decoder }{v.Mapper, info.Mapping.Codec}); len(errs) > 0 { return utilerrors.NewAggregate(errs) } for i := range items { item, err := v.InfoForObject(items[i]) if err != nil { return err } if len(info.ResourceVersion) != 0 { item.ResourceVersion = info.ResourceVersion } if err := fn(item, nil); err != nil { return err } } return nil }) }
// setupKernelTunables validates kernel tunable flags are set as expected // depending upon the specified option, it will either warn, error, or modify the kernel tunable flags func setupKernelTunables(option KernelTunableBehavior) error { desiredState := map[string]int{ utilsysctl.VmOvercommitMemory: utilsysctl.VmOvercommitMemoryAlways, utilsysctl.VmPanicOnOOM: utilsysctl.VmPanicOnOOMInvokeOOMKiller, } errList := []error{} for flag, expectedValue := range desiredState { val, err := utilsysctl.GetSysctl(flag) if err != nil { errList = append(errList, err) continue } if val == expectedValue { continue } switch option { case KernelTunableError: errList = append(errList, fmt.Errorf("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)) case KernelTunableWarn: glog.V(2).Infof("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val) case KernelTunableModify: glog.V(2).Infof("Updating kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val) err = utilsysctl.SetSysctl(flag, expectedValue) if err != nil { errList = append(errList, err) } } } return errors.NewAggregate(errList) }
func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) { names := sets.String{} for i, pod := range pods { var errlist []error if errs := validation.ValidatePod(pod); len(errs) != 0 { errlist = append(errlist, errs...) // If validation fails, don't trust it any further - // even Name could be bad. } else { name := kubecontainer.GetPodFullName(pod) if names.Has(name) { errlist = append(errlist, fielderrors.NewFieldDuplicate("name", pod.Name)) } else { names.Insert(name) } } if len(errlist) > 0 { name := bestPodIdentString(pod) err := utilerrors.NewAggregate(errlist) glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err) recorder.Eventf(pod, "FailedValidation", "Error validating pod %s from %s, ignoring: %v", name, source, err) continue } filtered = append(filtered, pod) } return }
// PodLimitFunc enforces resource requirements enumerated by the pod against // the specified LimitRange. The pod may be modified to apply default resource // requirements if not specified, and enumerated on the LimitRange func PodLimitFunc(limitRange *api.LimitRange, pod *api.Pod) error { var errs []error defaultResources := defaultContainerResourceRequirements(limitRange) mergePodResourceRequirements(pod, &defaultResources) for i := range limitRange.Spec.Limits { limit := limitRange.Spec.Limits[i] limitType := limit.Type // enforce container limits if limitType == api.LimitTypeContainer { for j := range pod.Spec.Containers { container := &pod.Spec.Containers[j] for k, v := range limit.Min { if err := minConstraint(limitType, k, v, container.Resources.Requests, container.Resources.Limits); err != nil { errs = append(errs, err) } } for k, v := range limit.Max { if err := maxConstraint(limitType, k, v, container.Resources.Requests, container.Resources.Limits); err != nil { errs = append(errs, err) } } for k, v := range limit.MaxLimitRequestRatio { if err := limitRequestRatioConstraint(limitType, k, v, container.Resources.Requests, container.Resources.Limits); err != nil { errs = append(errs, err) } } } } // enforce pod limits if limitType == api.LimitTypePod { containerRequests, containerLimits := []api.ResourceList{}, []api.ResourceList{} for j := range pod.Spec.Containers { container := &pod.Spec.Containers[j] containerRequests = append(containerRequests, container.Resources.Requests) containerLimits = append(containerLimits, container.Resources.Limits) } podRequests := sum(containerRequests) podLimits := sum(containerLimits) for k, v := range limit.Min { if err := minConstraint(limitType, k, v, podRequests, podLimits); err != nil { errs = append(errs, err) } } for k, v := range limit.Max { if err := maxConstraint(limitType, k, v, podRequests, podLimits); err != nil { errs = append(errs, err) } } for k, v := range limit.MaxLimitRequestRatio { if err := limitRequestRatioConstraint(limitType, k, v, podRequests, podLimits); err != nil { errs = append(errs, err) } } } } return errors.NewAggregate(errs) }
// InstallREST registers the REST handlers (storage, watch, proxy and redirect) into a restful Container. // It is expected that the provided path root prefix will serve all operations. Root MUST NOT end // in a slash. func (g *APIGroupVersion) InstallREST(container *restful.Container) error { installer := g.newInstaller() ws := installer.NewWebService() apiResources, registrationErrors := installer.Install(ws) // TODO: g.Version only contains "version" now, it will contain "group/version" in the near future. AddSupportedResourcesWebService(ws, g.Version, apiResources) container.Add(ws) return errors.NewAggregate(registrationErrors) }
func validateNoOverwrites(meta *api.ObjectMeta, labels map[string]string) error { allErrs := []error{} for key := range labels { if value, found := meta.Labels[key]; found { allErrs = append(allErrs, fmt.Errorf("'%s' already has a value (%s), and --overwrite is false", key, value)) } } return utilerrors.NewAggregate(allErrs) }
// Filter removes items from the ValidationErrorList that match the provided fns. func (list ValidationErrorList) Filter(fns ...errors.Matcher) ValidationErrorList { err := errors.FilterOut(errors.NewAggregate(list), fns...) if err == nil { return nil } // FilterOut that takes an Aggregate returns an Aggregate agg := err.(errors.Aggregate) return ValidationErrorList(agg.Errors()) }
func (c configValidationTest) testCluster(clusterName string, t *testing.T) { errs := validateClusterInfo(clusterName, *c.config.Clusters[clusterName]) if len(c.expectedErrorSubstring) != 0 { if len(errs) == 0 { t.Errorf("Expected error containing: %v", c.expectedErrorSubstring) } for _, curr := range c.expectedErrorSubstring { if len(errs) != 0 && !strings.Contains(utilerrors.NewAggregate(errs).Error(), curr) { t.Errorf("Expected error containing: %v, but got %v", c.expectedErrorSubstring, utilerrors.NewAggregate(errs)) } } } else { if len(errs) != 0 { t.Errorf("Unexpected error: %v", utilerrors.NewAggregate(errs)) } } }
func (c configValidationTest) testAuthInfo(authInfoName string, t *testing.T) { errs := validateAuthInfo(authInfoName, *c.config.AuthInfos[authInfoName]) if len(c.expectedErrorSubstring) != 0 { if len(errs) == 0 { t.Errorf("Expected error containing: %v", c.expectedErrorSubstring) } for _, curr := range c.expectedErrorSubstring { if len(errs) != 0 && !strings.Contains(errors.NewAggregate(errs).Error(), curr) { t.Errorf("Expected error containing: %v, but got %v", c.expectedErrorSubstring, errors.NewAggregate(errs)) } } } else { if len(errs) != 0 { t.Errorf("Unexpected error: %v", errors.NewAggregate(errs)) } } }
func validateFlags(cmd *cobra.Command) error { errs := []error{} max, min, cpu := cmdutil.GetFlagInt(cmd, "max"), cmdutil.GetFlagInt(cmd, "min"), cmdutil.GetFlagInt(cmd, "cpu-percent") if max < 1 || max < min { errs = append(errs, fmt.Errorf("--max=MAXPODS is required, and must be at least 1 and --min=MINPODS")) } if cpu > 100 { errs = append(errs, fmt.Errorf("CPU utilization (%%) cannot exceed 100")) } return errors.NewAggregate(errs) }
// Validate checks for errors in the Config // It does not return early so that it can find as many errors as possible func ValidatePolicy(policy schedulerapi.Policy) error { validationErrors := make([]error, 0) for _, priority := range policy.Priorities { if priority.Weight <= 0 { validationErrors = append(validationErrors, fmt.Errorf("Priority %s should have a positive weight applied to it", priority.Name)) } } return errors.NewAggregate(validationErrors) }
// NewForbidden is a utility function to return a well-formatted admission control error response func NewForbidden(a Attributes, internalError error) error { // do not double wrap an error of same type if apierrors.IsForbidden(internalError) { return internalError } name, kind, err := extractKindName(a) if err != nil { return apierrors.NewInternalError(errs.NewAggregate([]error{internalError, err})) } return apierrors.NewForbidden(kind, name, internalError) }
// ValidateParams ensures that all required params are present in the params map func ValidateParams(paramSpec []GeneratorParam, params map[string]interface{}) error { allErrs := []error{} for ix := range paramSpec { if paramSpec[ix].Required { value, found := params[paramSpec[ix].Name] if !found || IsZero(value) { allErrs = append(allErrs, fmt.Errorf("Parameter: %s is required", paramSpec[ix].Name)) } } } return errors.NewAggregate(allErrs) }
// Ensures the system container is created and all non-kernel threads and process 1 // without a container are moved to it. // // The reason of leaving kernel threads at root cgroup is that we don't want to tie the // execution of these threads with to-be defined /system quota and create priority inversions. // // The reason of leaving process 1 at root cgroup is that libcontainer hardcoded on // the base cgroup path based on process 1. Please see: // https://github.com/kubernetes/kubernetes/issues/12789#issuecomment-132384126 // for detail explanation. func ensureSystemContainer(rootContainer *fs.Manager, manager *fs.Manager) error { // Move non-kernel PIDs to the system container. attemptsRemaining := 10 var errs []error for attemptsRemaining >= 0 { // Only keep errors on latest attempt. errs = []error{} attemptsRemaining-- allPids, err := rootContainer.GetPids() if err != nil { errs = append(errs, fmt.Errorf("failed to list PIDs for root: %v", err)) continue } // Remove kernel pids and process 1 pids := make([]int, 0, len(allPids)) for _, pid := range allPids { if isKernelPid(pid) { continue } // TODO(dawnchen): Remove this once the hard dependency on process 1 is removed // on systemd node. if pid == 1 { continue } pids = append(pids, pid) } glog.Infof("Found %d PIDs in root, %d of them are kernel related", len(allPids), len(allPids)-len(pids)) // Check if we moved all the non-kernel PIDs. if len(pids) == 0 { break } glog.Infof("Moving non-kernel threads: %v", pids) for _, pid := range pids { err := manager.Apply(pid) if err != nil { errs = append(errs, fmt.Errorf("failed to move PID %d into the system container %q: %v", pid, manager.Cgroups.Name, err)) continue } } } if attemptsRemaining < 0 { errs = append(errs, fmt.Errorf("ran out of attempts to create system containers %q", manager.Cgroups.Name)) } return errors.NewAggregate(errs) }
func (p dockerPuller) Pull(image string, secrets []api.Secret) error { repoToPull, tag := parseImageName(image) // If no tag was specified, use the default "latest". if len(tag) == 0 { tag = "latest" } opts := docker.PullImageOptions{ Repository: repoToPull, Tag: tag, } keyring, err := credentialprovider.MakeDockerKeyring(secrets, p.keyring) if err != nil { return err } creds, haveCredentials := keyring.Lookup(repoToPull) if !haveCredentials { glog.V(1).Infof("Pulling image %s without credentials", image) err := p.client.PullImage(opts, docker.AuthConfiguration{}) if err == nil { return nil } // Image spec: [<registry>/]<repository>/<image>[:<version] so we count '/' explicitRegistry := (strings.Count(image, "/") == 2) // Hack, look for a private registry, and decorate the error with the lack of // credentials. This is heuristic, and really probably could be done better // by talking to the registry API directly from the kubelet here. if explicitRegistry { return fmt.Errorf("image pull failed for %s, this may be because there are no credentials on this request. details: (%v)", image, err) } return filterHTTPError(err, image) } var pullErrs []error for _, currentCreds := range creds { err := p.client.PullImage(opts, currentCreds) // If there was no error, return success if err == nil { return nil } pullErrs = append(pullErrs, filterHTTPError(err, image)) } return utilerrors.NewAggregate(pullErrs) }
// Authorizes against a chain of authorizer.Authorizer objects and returns nil if successful and returns error if unsuccessful func (authzHandler unionAuthzHandler) Authorize(a authorizer.Attributes) error { var errlist []error for _, currAuthzHandler := range authzHandler { err := currAuthzHandler.Authorize(a) if err != nil { errlist = append(errlist, err) continue } return nil } return errors.NewAggregate(errlist) }
// collectData collects requested downwardAPI in data map. // Map's key is the requested name of file to dump // Map's value is the (sorted) content of the field to be dumped in the file. func (d *downwardAPIVolume) collectData() (map[string]string, error) { errlist := []error{} data := make(map[string]string) for fieldReference, fileName := range d.fieldReferenceFileNames { if values, err := fieldpath.ExtractFieldPathAsString(d.pod, fieldReference); err != nil { glog.Errorf("Unable to extract field %s: %s", fieldReference, err.Error()) errlist = append(errlist, err) } else { data[fileName] = sortLines(values) } } return data, utilErrors.NewAggregate(errlist) }
func RunDescribe(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, options *DescribeOptions) error { selector := cmdutil.GetFlagString(cmd, "selector") cmdNamespace, enforceNamespace, err := f.DefaultNamespace() if err != nil { return err } if len(args) == 0 && len(options.Filenames) == 0 { fmt.Fprint(out, "You must specify the type of resource to describe. ", valid_resources) return cmdutil.UsageError(cmd, "Required resource not specified.") } mapper, typer := f.Object() r := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()). ContinueOnError(). NamespaceParam(cmdNamespace).DefaultNamespace(). FilenameParam(enforceNamespace, options.Filenames...). SelectorParam(selector). ResourceTypeOrNameArgs(true, args...). Flatten(). Do() err = r.Err() if err != nil { return err } allErrs := []error{} infos, err := r.Infos() if err != nil { if apierrors.IsNotFound(err) && len(args) == 2 { return DescribeMatchingResources(mapper, typer, f, cmdNamespace, args[0], args[1], out, err) } allErrs = append(allErrs, err) } for _, info := range infos { mapping := info.ResourceMapping() describer, err := f.Describer(mapping) if err != nil { allErrs = append(allErrs, err) continue } s, err := describer.Describe(info.Namespace, info.Name) if err != nil { allErrs = append(allErrs, err) continue } fmt.Fprintf(out, "%s\n\n", s) } return utilerrors.NewAggregate(allErrs) }
// Load starts by running the MigrationRules and then // takes the loading rules and returns a Config object based on following rules. // if the ExplicitPath, return the unmerged explicit file // Otherwise, return a merged config based on the Precedence slice // A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. // Read errors or files with non-deserializable content produce errors. // The first file to set a particular map key wins and map key's value is never changed. // BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed. // This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two. // It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even // non-conflicting entries from the second file's "red-user" are discarded. // Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder // and only absolute file paths are returned. func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { if err := rules.Migrate(); err != nil { return nil, err } errlist := []error{} kubeConfigFiles := []string{} // Make sure a file we were explicitly told to use exists if len(rules.ExplicitPath) > 0 { if _, err := os.Stat(rules.ExplicitPath); os.IsNotExist(err) { return nil, err } kubeConfigFiles = append(kubeConfigFiles, rules.ExplicitPath) } else { kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...) } // first merge all of our maps mapConfig := clientcmdapi.NewConfig() for _, file := range kubeConfigFiles { if err := mergeConfigWithFile(mapConfig, file); err != nil { errlist = append(errlist, err) } } // merge all of the struct values in the reverse order so that priority is given correctly // errors are not added to the list the second time nonMapConfig := clientcmdapi.NewConfig() for i := len(kubeConfigFiles) - 1; i >= 0; i-- { file := kubeConfigFiles[i] mergeConfigWithFile(nonMapConfig, file) } // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and // get the values we expect. config := clientcmdapi.NewConfig() mergo.Merge(config, mapConfig) mergo.Merge(config, nonMapConfig) if rules.ResolvePaths() { if err := ResolveLocalPaths(config); err != nil { errlist = append(errlist, err) } } return config, errors.NewAggregate(errlist) }
func (s *SwaggerSchema) ValidateBytes(data []byte) error { var obj interface{} out, err := yaml.ToJSON(data) if err != nil { return err } data = out if err := json.Unmarshal(data, &obj); err != nil { return err } fields, ok := obj.(map[string]interface{}) if !ok { return fmt.Errorf("error in unmarshaling data %s", string(data)) } groupVersion := fields["apiVersion"] if groupVersion == nil { return fmt.Errorf("apiVersion not set") } if _, ok := groupVersion.(string); !ok { return fmt.Errorf("apiVersion isn't string type") } kind := fields["kind"] if kind == nil { return fmt.Errorf("kind not set") } if _, ok := kind.(string); !ok { return fmt.Errorf("kind isn't string type") } if strings.HasSuffix(kind.(string), "List") { return utilerrors.NewAggregate(s.validateList(fields)) } version := apiutil.GetVersion(groupVersion.(string)) allErrs := s.ValidateObject(obj, "", version+"."+kind.(string)) if len(allErrs) == 1 { return allErrs[0] } return utilerrors.NewAggregate(allErrs) }
// AuthenticateRequest authenticates the request using a chain of authenticator.Request objects. The first // success returns that identity. Errors are only returned if no matches are found. func (authHandler unionAuthRequestHandler) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { var errlist []error for _, currAuthRequestHandler := range authHandler { info, ok, err := currAuthRequestHandler.AuthenticateRequest(req) if err != nil { errlist = append(errlist, err) continue } if ok { return info, true, nil } } return nil, false, errors.NewAggregate(errlist) }
// writeDataInTimestampDir writes the latest data into a new temporary directory with a timestamp. func (d *downwardAPIVolume) writeDataInTimestampDir(data map[string]string) (string, error) { errlist := []error{} timestampDir, err := ioutil.TempDir(d.GetPath(), ".."+time.Now().Format("2006_01_02_15_04_05")) for fileName, values := range data { fullPathFile := path.Join(timestampDir, fileName) dir, _ := filepath.Split(fullPathFile) if err = os.MkdirAll(dir, os.ModePerm); err != nil { glog.Errorf("Unable to create directory `%s`: %s", dir, err.Error()) return "", err } if err := ioutil.WriteFile(fullPathFile, []byte(values), 0644); err != nil { glog.Errorf("Unable to write file `%s`: %s", fullPathFile, err.Error()) errlist = append(errlist, err) } } return timestampDir, utilErrors.NewAggregate(errlist) }
// Visit implements Visitor, and gathers errors that occur during processing until // all sub visitors have been visited. func (l EagerVisitorList) Visit(fn VisitorFunc) error { errs := []error(nil) for i := range l { if err := l[i].Visit(func(info *Info, err error) error { if err != nil { errs = append(errs, err) return nil } if err := fn(info, nil); err != nil { errs = append(errs, err) } return nil }); err != nil { errs = append(errs, err) } } return utilerrors.NewAggregate(errs) }
// UpdateREST registers the REST handlers for this APIGroupVersion to an existing web service // in the restful Container. It will use the prefix (root/version) to find the existing // web service. If a web service does not exist within the container to support the prefix // this method will return an error. func (g *APIGroupVersion) UpdateREST(container *restful.Container) error { installer := g.newInstaller() var ws *restful.WebService = nil for i, s := range container.RegisteredWebServices() { if s.RootPath() == installer.prefix { ws = container.RegisteredWebServices()[i] break } } if ws == nil { return apierrors.NewInternalError(fmt.Errorf("unable to find an existing webservice for prefix %s", installer.prefix)) } apiResources, registrationErrors := installer.Install(ws) // TODO: g.Version only contains "version" now, it will contain "group/version" in the near future. AddSupportedResourcesWebService(ws, g.Version, apiResources) return errors.NewAggregate(registrationErrors) }
// Flush all of our custom iptables rules. func iptablesFlush(ipt iptables.Interface) error { el := []error{} if err := ipt.FlushChain(iptables.TableNAT, iptablesContainerPortalChain); err != nil { el = append(el, err) } if err := ipt.FlushChain(iptables.TableNAT, iptablesHostPortalChain); err != nil { el = append(el, err) } if err := ipt.FlushChain(iptables.TableNAT, iptablesContainerNodePortChain); err != nil { el = append(el, err) } if err := ipt.FlushChain(iptables.TableNAT, iptablesHostNodePortChain); err != nil { el = append(el, err) } if len(el) != 0 { glog.Errorf("Some errors flushing old iptables portals: %v", el) } return errors.NewAggregate(el) }
// Ensures that the Docker daemon is in the desired container. func ensureDockerInContainer(cadvisor cadvisor.Interface, oomScoreAdj int, manager *fs.Manager) error { // What container is Docker in? out, err := exec.Command("pidof", "docker").Output() if err != nil { return fmt.Errorf("failed to find pid of Docker container: %v", err) } // The output of pidof is a list of pids. // Docker may be forking and thus there would be more than one result. pids := []int{} for _, pidStr := range strings.Split(strings.TrimSpace(string(out)), " ") { pid, err := strconv.Atoi(pidStr) if err != nil { continue } pids = append(pids, pid) } // Move if the pid is not already in the desired container. errs := []error{} for _, pid := range pids { cont, err := getContainer(pid) if err != nil { errs = append(errs, fmt.Errorf("failed to find container of PID %d: %v", pid, err)) } if cont != manager.Cgroups.Name { err = manager.Apply(pid) if err != nil { errs = append(errs, fmt.Errorf("failed to move PID %d (in %q) to %q", pid, cont, manager.Cgroups.Name)) } } // Also apply oom-score-adj to processes oomAdjuster := oom.NewOomAdjuster() if err := oomAdjuster.ApplyOomScoreAdj(pid, oomScoreAdj); err != nil { errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d", oomScoreAdj, pid)) } } return errors.NewAggregate(errs) }