Esempio n. 1
0
func (l *ListPodsOptions) Run() error {
	nodes, err := l.Options.GetNodes()
	if err != nil {
		return err
	}

	var printer kubectl.ResourcePrinter
	if l.Options.CmdPrinterOutput {
		printer = l.Options.CmdPrinter
	} else {
		printer, err = l.Options.GetPrintersByResource(unversioned.GroupVersionResource{Resource: "pod"})
		if err != nil {
			return err
		}
	}

	// determine if printer kind is json or yaml and modify output
	// to combine all pod lists into a single list
	if l.Options.CmdPrinterOutput {
		errs := l.handleRESTOutput(nodes, printer)
		return kerrors.NewAggregate(errs)
	}

	errList := []error{}
	for _, node := range nodes {
		err := l.runListPods(node, printer)
		if err != nil {
			// Don't bail out if one node fails
			errList = append(errList, err)
		}
	}
	return kerrors.NewAggregate(errList)
}
Esempio n. 2
0
func (g *configRESTOptionsGetter) loadWatchCacheSettings() error {
	if g.masterOptions.KubernetesMasterConfig == nil {
		return nil
	}

	server := apiserveroptions.NewAPIServer()
	if errs := cmdflags.Resolve(g.masterOptions.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(errs) > 0 {
		return kerrors.NewAggregate(errs)
	}

	g.cacheEnabled = server.EnableWatchCache

	errs := []error{}
	for _, c := range server.WatchCacheSizes {
		tokens := strings.Split(c, "#")
		if len(tokens) != 2 {
			errs = append(errs, fmt.Errorf("invalid watch cache size value '%s', expecting <resource>#<size> format (e.g. builds#100)", c))
			continue
		}

		resource := unversioned.ParseGroupResource(tokens[0])

		size, err := strconv.Atoi(tokens[1])
		if err != nil {
			errs = append(errs, fmt.Errorf("invalid watch cache size value '%s': %v", c, err))
			continue
		}

		g.cacheSizes[resource] = size
	}
	return kerrors.NewAggregate(errs)
}
func (o *ReconcileClusterRoleBindingsOptions) RunReconcileClusterRoleBindings(cmd *cobra.Command, f *clientcmd.Factory) error {
	changedClusterRoleBindings, fetchErr := o.ChangedClusterRoleBindings()
	if fetchErr != nil && !IsClusterRoleBindingLookupError(fetchErr) {
		// we got an error that isn't due to a partial match, so we can't continue
		return fetchErr
	}

	if len(changedClusterRoleBindings) == 0 {
		return fetchErr
	}

	if (len(o.Output) != 0) && !o.Confirmed {
		list := &kapi.List{}
		for _, item := range changedClusterRoleBindings {
			list.Items = append(list.Items, item)
		}
		mapper, _ := f.Object(false)
		fn := cmdutil.VersionedPrintObject(f.PrintObject, cmd, mapper, o.Out)
		if err := fn(list); err != nil {
			return kutilerrors.NewAggregate([]error{fetchErr, err})
		}
	}

	if o.Confirmed {
		if err := o.ReplaceChangedRoleBindings(changedClusterRoleBindings); err != nil {
			return kutilerrors.NewAggregate([]error{fetchErr, err})
		}
	}

	return fetchErr
}
// Resolve resolves the provided input and returns only exact matches
func (r PerfectMatchWeightedResolver) Resolve(value string) (*ComponentMatch, error) {
	var errs []error
	candidates := ScoredComponentMatches{}
	var group MultiSimpleSearcher
	var groupWeight float32 = 0.0
	for i, resolver := range r {
		// lump all resolvers with the same weight into a single group
		if len(group) == 0 || resolver.Weight == groupWeight {
			group = append(group, resolver.Searcher)
			groupWeight = resolver.Weight
			if i != len(r)-1 && r[i+1].Weight == groupWeight {
				continue
			}
		}
		matches, err := group.Search(true, value)
		if err != nil {
			glog.V(5).Infof("Error from resolver: %v\n", err)
			errs = append(errs, err...)
		}

		sort.Sort(ScoredComponentMatches(matches))
		if len(matches) > 0 && matches[0].Score == 0.0 && (len(matches) == 1 || matches[1].Score != 0.0) {
			return matches[0], errors.NewAggregate(errs)
		}
		for _, m := range matches {
			if groupWeight != 0.0 {
				m.Score = groupWeight * m.Score
			}
			candidates = append(candidates, m)
		}
		group = nil
	}

	switch len(candidates) {
	case 0:
		return nil, ErrNoMatch{Value: value, Errs: errs}
	case 1:
		if candidates[0].Score != 0.0 {
			if candidates[0].NoTagsFound {
				return nil, ErrNoTagsFound{Value: value, Match: candidates[0], Errs: errs}
			}
			return nil, ErrPartialMatch{Value: value, Match: candidates[0], Errs: errs}
		}
		return candidates[0], errors.NewAggregate(errs)
	default:
		sort.Sort(candidates)
		if candidates[0].Score < candidates[1].Score {
			if candidates[0].Score != 0.0 {
				return nil, ErrPartialMatch{Value: value, Match: candidates[0], Errs: errs}
			}
			return candidates[0], errors.NewAggregate(errs)
		}
		return nil, ErrMultipleMatches{Value: value, Matches: candidates, Errs: errs}
	}
}
Esempio n. 5
0
func (g *configRESTOptionsGetter) loadSettings() error {
	server := apiserveroptions.NewAPIServer()
	if g.masterOptions.KubernetesMasterConfig != nil {
		if errs := cmdflags.Resolve(g.masterOptions.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(errs) > 0 {
			return kerrors.NewAggregate(errs)
		}
	}

	storageGroupsToEncodingVersion, err := server.StorageGroupsToEncodingVersion()
	if err != nil {
		return err
	}

	storageConfig := server.StorageConfig
	storageConfig.Prefix = g.masterOptions.EtcdStorageConfig.OpenShiftStoragePrefix
	storageConfig.ServerList = g.masterOptions.EtcdClientInfo.URLs
	storageConfig.KeyFile = g.masterOptions.EtcdClientInfo.ClientCert.KeyFile
	storageConfig.CertFile = g.masterOptions.EtcdClientInfo.ClientCert.CertFile
	storageConfig.CAFile = g.masterOptions.EtcdClientInfo.CA

	storageFactory, err := genericapiserver.BuildDefaultStorageFactory(
		storageConfig, server.DefaultStorageMediaType, kapi.Codecs,
		genericapiserver.NewDefaultResourceEncodingConfig(), storageGroupsToEncodingVersion,
		nil,
		g.defaultResourceConfig, server.RuntimeConfig)
	if err != nil {
		return err
	}
	storageFactory.DefaultResourcePrefixes = g.defaultResourcePrefixes
	g.storageFactory = storageFactory

	g.cacheEnabled = server.EnableWatchCache

	errs := []error{}
	for _, c := range server.WatchCacheSizes {
		tokens := strings.Split(c, "#")
		if len(tokens) != 2 {
			errs = append(errs, fmt.Errorf("invalid watch cache size value '%s', expecting <resource>#<size> format (e.g. builds#100)", c))
			continue
		}

		resource := unversioned.ParseGroupResource(tokens[0])

		size, err := strconv.Atoi(tokens[1])
		if err != nil {
			errs = append(errs, fmt.Errorf("invalid watch cache size value '%s': %v", c, err))
			continue
		}

		g.cacheSizes[resource] = size
	}
	return kerrors.NewAggregate(errs)
}
Esempio n. 6
0
// Run identifies images eligible for pruning, invoking imagePruner for each image, and then it identifies
// image configs and layers  eligible for pruning, invoking layerLinkPruner for each registry URL that has
// layers or configs that can be pruned.
func (p *pruner) Prune(
	imagePruner ImageDeleter,
	streamPruner ImageStreamDeleter,
	layerLinkPruner LayerLinkDeleter,
	blobPruner BlobDeleter,
	manifestPruner ManifestDeleter,
) error {
	allNodes := p.g.Nodes()

	imageNodes := getImageNodes(allNodes)
	if len(imageNodes) == 0 {
		return nil
	}

	registryURL, err := p.determineRegistry(imageNodes)
	if err != nil {
		return fmt.Errorf("unable to determine registry: %v", err)
	}
	glog.V(1).Infof("Using registry: %s", registryURL)

	if err := p.registryPinger.ping(registryURL); err != nil {
		return fmt.Errorf("error communicating with registry: %v", err)
	}

	prunableImageNodes, prunableImageIDs := calculatePrunableImages(p.g, imageNodes)

	errs := []error{}
	errs = append(errs, pruneStreams(p.g, prunableImageNodes, streamPruner)...)
	// if namespace is specified prune only ImageStreams and nothing more
	if len(p.algorithm.namespace) > 0 {
		return kerrors.NewAggregate(errs)
	}

	graphWithoutPrunableImages := subgraphWithoutPrunableImages(p.g, prunableImageIDs)
	prunableComponents := calculatePrunableImageComponents(graphWithoutPrunableImages)
	errs = append(errs, pruneImageComponents(p.g, p.registryClient, registryURL, prunableComponents, layerLinkPruner)...)
	errs = append(errs, pruneBlobs(p.g, p.registryClient, registryURL, prunableComponents, blobPruner)...)
	errs = append(errs, pruneManifests(p.g, p.registryClient, registryURL, prunableImageNodes, manifestPruner)...)

	if len(errs) > 0 {
		// If we had any errors removing image references from image streams or deleting
		// layers, blobs, or manifest data from the registry, stop here and don't
		// delete any images. This way, you can rerun prune and retry things that failed.
		return kerrors.NewAggregate(errs)
	}

	errs = append(errs, pruneImages(p.g, prunableImageNodes, imagePruner)...)
	return kerrors.NewAggregate(errs)
}
// ServerPreferredNamespacedGroupVersionResources uses the specified client to discover the set of preferred groupVersionResources that are namespaced
func ServerPreferredNamespacedGroupVersionResources(discoveryClient discovery.DiscoveryInterface) ([]unversioned.GroupVersionResource, error) {
	results := []unversioned.GroupVersionResource{}
	serverGroupList, err := discoveryClient.ServerGroups()
	if err != nil {
		return results, err
	}

	allErrs := []error{}
	for _, apiGroup := range serverGroupList.Groups {
		preferredVersion := apiGroup.PreferredVersion
		apiResourceList, err := discoveryClient.ServerResourcesForGroupVersion(preferredVersion.GroupVersion)
		if err != nil {
			allErrs = append(allErrs, err)
			continue
		}
		groupVersion := unversioned.GroupVersion{Group: apiGroup.Name, Version: preferredVersion.Version}
		for _, apiResource := range apiResourceList.APIResources {
			if !apiResource.Namespaced {
				continue
			}
			if strings.Contains(apiResource.Name, "/") {
				continue
			}
			results = append(results, groupVersion.WithResource(apiResource.Name))
		}
	}
	return results, utilerrors.NewAggregate(allErrs)
}
Esempio n. 8
0
// resolve the references to ensure they are all valid, and identify any images that don't match user input.
func (c *AppConfig) resolve(components app.ComponentReferences) error {
	errs := []error{}
	for _, ref := range components {
		if err := ref.Resolve(); err != nil {
			errs = append(errs, err)
			continue
		}
		switch input := ref.Input(); {
		case !input.ExpectToBuild && input.ResolvedMatch.Builder:
			if c.Strategy != "docker" {
				glog.Infof("Image %q is a builder, so a repository will be expected unless you also specify --strategy=docker", input)
				input.ExpectToBuild = true
			}
		case input.ExpectToBuild && input.ResolvedMatch.IsTemplate():
			// TODO: harder - break the template pieces and check if source code can be attached (look for a build config, build image, etc)
			errs = append(errs, fmt.Errorf("template with source code explicitly attached is not supported - you must either specify the template and source code separately or attach an image to the source code using the '[image]~[code]' form"))
			continue
		case input.ExpectToBuild && !input.ResolvedMatch.Builder && !input.Uses.IsDockerBuild():
			if len(c.Strategy) == 0 {
				errs = append(errs, fmt.Errorf("none of the images that match %q can build source code - check whether this is the image you want to use, then use --strategy=source to build using source or --strategy=docker to treat this as a Docker base image and set up a layered Docker build", ref))
				continue
			}
		}
	}
	return errors.NewAggregate(errs)
}
Esempio n. 9
0
// tagImages tags images from the deployment
func (e *HookExecutor) tagImages(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, suffix, label string) error {
	var errs []error
	for _, action := range hook.TagImages {
		value, ok := findContainerImage(deployment, action.ContainerName)
		if !ok {
			errs = append(errs, fmt.Errorf("unable to find image for container %q, container could not be found", action.ContainerName))
			continue
		}
		namespace := action.To.Namespace
		if len(namespace) == 0 {
			namespace = deployment.Namespace
		}
		if _, err := e.tags.ImageStreamTags(namespace).Update(&imageapi.ImageStreamTag{
			ObjectMeta: kapi.ObjectMeta{
				Name:      action.To.Name,
				Namespace: namespace,
			},
			Tag: &imageapi.TagReference{
				From: &kapi.ObjectReference{
					Kind: "DockerImage",
					Name: value,
				},
			},
		}); err != nil {
			errs = append(errs, err)
			continue
		}
		fmt.Fprintf(e.out, "--> %s: Tagged %q into %s/%s\n", label, value, action.To.Namespace, action.To.Name)
	}

	return utilerrors.NewAggregate(errs)
}
Esempio n. 10
0
// addDockerfile adds a Dockerfile passed in the command line to the reference
// builder.
func (c *AppConfig) addDockerfile() error {
	if len(c.Strategy) != 0 && c.Strategy != "docker" {
		return fmt.Errorf("when directly referencing a Dockerfile, the strategy must must be 'docker'")
	}
	_, repos, errs := c.refBuilder.Result()
	if err := errors.NewAggregate(errs); err != nil {
		return err
	}
	switch len(repos) {
	case 0:
		// Create a new SourceRepository with the Dockerfile.
		repo, err := app.NewSourceRepositoryForDockerfile(c.Dockerfile)
		if err != nil {
			return fmt.Errorf("provided Dockerfile is not valid: %v", err)
		}
		c.refBuilder.AddExistingSourceRepository(repo)
	case 1:
		// Add the Dockerfile to the existing SourceRepository, so that
		// eventually we generate a single BuildConfig with multiple
		// sources.
		if err := repos[0].AddDockerfile(c.Dockerfile); err != nil {
			return fmt.Errorf("provided Dockerfile is not valid: %v", err)
		}
	default:
		// Invalid.
		return fmt.Errorf("--dockerfile cannot be used with multiple source repositories")
	}
	return nil
}
Esempio n. 11
0
func (b *Builder) visitorResult() *Result {
	if len(b.errs) > 0 {
		return &Result{err: utilerrors.NewAggregate(b.errs)}
	}

	if b.selectAll {
		b.selector = labels.Everything()
	}

	// visit items specified by paths
	if len(b.paths) != 0 {
		return b.visitByPaths()
	}

	// visit selectors
	if b.selector != nil {
		return b.visitBySelector()
	}

	// visit items specified by resource and name
	if len(b.resourceTuples) != 0 {
		return b.visitByResource()
	}

	// visit items specified by name
	if len(b.names) != 0 {
		return b.visitByName()
	}

	if len(b.resources) != 0 {
		return &Result{err: fmt.Errorf("resource(s) were provided, but no name, label selector, or --all flag specified")}
	}
	return &Result{err: missingResourceError}
}
Esempio n. 12
0
func (oc *OvsController) validateServiceNetwork(networkCIDR string, hostIPNets []*net.IPNet) error {
	serviceIP, serviceIPNet, err := net.ParseCIDR(networkCIDR)
	if err != nil {
		return fmt.Errorf("Failed to parse network address: %s", networkCIDR)
	}

	errList := []error{}
	for _, ipNet := range hostIPNets {
		if ipNet.Contains(serviceIP) {
			errList = append(errList, fmt.Errorf("Error: Service IP: %s conflicts with host network: %s", ipNet.String(), networkCIDR))
		}
		if serviceIPNet.Contains(ipNet.IP) {
			errList = append(errList, fmt.Errorf("Error: Host network with IP: %s conflicts with service network: %s", ipNet.IP.String(), networkCIDR))
		}
	}

	services, _, err := oc.Registry.GetServices()
	if err != nil {
		return err
	}
	for _, svc := range services {
		if !serviceIPNet.Contains(net.ParseIP(svc.IP)) {
			errList = append(errList, fmt.Errorf("Error: Existing service with IP: %s is not part of service network: %s", svc.IP, networkCIDR))
		}
	}
	return kerrors.NewAggregate(errList)
}
Esempio n. 13
0
// ToAggregate converts the ErrorList into an errors.Aggregate.
func (list ErrorList) ToAggregate() utilerrors.Aggregate {
	errs := make([]error, len(list))
	for i := range list {
		errs[i] = list[i]
	}
	return utilerrors.NewAggregate(errs)
}
Esempio n. 14
0
func statusCausesToAggrError(scs []api.StatusCause) utilerrors.Aggregate {
	errs := make([]error, len(scs))
	for i, sc := range scs {
		errs[i] = fmt.Errorf("%s: %s", sc.Field, sc.Message)
	}
	return utilerrors.NewAggregate(errs)
}
Esempio n. 15
0
// collectData collects requested downwardAPI in data map.
// Map's key is the requested name of file to dump
// Map's value is the (sorted) content of the field to be dumped in the file.
func (d *downwardAPIVolume) collectData() (map[string][]byte, error) {
	errlist := []error{}
	data := make(map[string][]byte)
	for _, fileInfo := range d.items {
		if fileInfo.FieldRef != nil {
			if values, err := fieldpath.ExtractFieldPathAsString(d.pod, fileInfo.FieldRef.FieldPath); err != nil {
				glog.Errorf("Unable to extract field %s: %s", fileInfo.FieldRef.FieldPath, err.Error())
				errlist = append(errlist, err)
			} else {
				data[path.Clean(fileInfo.Path)] = []byte(sortLines(values))
			}
		} else if fileInfo.ResourceFieldRef != nil {
			containerName := fileInfo.ResourceFieldRef.ContainerName
			nodeAllocatable, err := d.plugin.host.GetNodeAllocatable()
			if err != nil {
				errlist = append(errlist, err)
			} else if values, err := fieldpath.ExtractResourceValueByContainerNameAndNodeAllocatable(fileInfo.ResourceFieldRef, d.pod, containerName, nodeAllocatable); err != nil {
				glog.Errorf("Unable to extract field %s: %s", fileInfo.ResourceFieldRef.Resource, err.Error())
				errlist = append(errlist, err)
			} else {
				data[path.Clean(fileInfo.Path)] = []byte(sortLines(values))
			}
		}
	}
	return data, utilerrors.NewAggregate(errlist)
}
Esempio n. 16
0
// reorganizeTaints returns the updated set of taints, taking into account old taints that were not updated,
// old taints that were updated, old taints that were deleted, and new taints.
func reorganizeTaints(accessor meta.Object, overwrite bool, taintsToAdd []api.Taint, taintsToRemove []api.Taint) ([]api.Taint, error) {
	newTaints := append([]api.Taint{}, taintsToAdd...)

	var oldTaints []api.Taint
	var err error
	annotations := accessor.GetAnnotations()
	if annotations != nil {
		if oldTaints, err = api.GetTaintsFromNodeAnnotations(annotations); err != nil {
			return nil, err
		}
	}

	// add taints that already existing but not updated to newTaints
	for _, oldTaint := range oldTaints {
		existsInNew := false
		for _, taint := range newTaints {
			if taint.MatchTaint(oldTaint) {
				existsInNew = true
				break
			}
		}
		if !existsInNew {
			newTaints = append(newTaints, oldTaint)
		}
	}

	allErrs := []error{}
	for _, taintToRemove := range taintsToRemove {
		newTaints, err = deleteTaint(newTaints, taintToRemove)
		if err != nil {
			allErrs = append(allErrs, err)
		}
	}
	return newTaints, utilerrors.NewAggregate(allErrs)
}
Esempio n. 17
0
func (node *OsdnNode) updatePodNetwork(namespace string, netID uint32) error {
	// Update OF rules for the existing/old pods in the namespace
	pods, err := node.GetLocalPods(namespace)
	if err != nil {
		return err
	}
	for _, pod := range pods {
		err = node.UpdatePod(pod.Namespace, pod.Name, kubetypes.DockerID(getPodContainerID(&pod)))
		if err != nil {
			return err
		}
	}

	// Update OF rules for the old services in the namespace
	services, err := node.registry.GetServicesForNamespace(namespace)
	if err != nil {
		return err
	}
	errList := []error{}
	for _, svc := range services {
		if err = node.DeleteServiceRules(&svc); err != nil {
			log.Error(err)
		}
		if err = node.AddServiceRules(&svc, netID); err != nil {
			errList = append(errList, err)
		}
	}
	return kerrors.NewAggregate(errList)
}
Esempio n. 18
0
// setupKernelTunables validates kernel tunable flags are set as expected
// depending upon the specified option, it will either warn, error, or modify the kernel tunable flags
func setupKernelTunables(option KernelTunableBehavior) error {
	desiredState := map[string]int{
		utilsysctl.VmOvercommitMemory: utilsysctl.VmOvercommitMemoryAlways,
		utilsysctl.VmPanicOnOOM:       utilsysctl.VmPanicOnOOMInvokeOOMKiller,
		utilsysctl.KernelPanic:        utilsysctl.KernelPanicRebootTimeout,
		utilsysctl.KernelPanicOnOops:  utilsysctl.KernelPanicOnOopsAlways,
	}

	errList := []error{}
	for flag, expectedValue := range desiredState {
		val, err := utilsysctl.GetSysctl(flag)
		if err != nil {
			errList = append(errList, err)
			continue
		}
		if val == expectedValue {
			continue
		}

		switch option {
		case KernelTunableError:
			errList = append(errList, fmt.Errorf("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val))
		case KernelTunableWarn:
			glog.V(2).Infof("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)
		case KernelTunableModify:
			glog.V(2).Infof("Updating kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)
			err = utilsysctl.SetSysctl(flag, expectedValue)
			if err != nil {
				errList = append(errList, err)
			}
		}
	}
	return utilerrors.NewAggregate(errList)
}
Esempio n. 19
0
func (oc *OvsController) validateClusterNetwork(networkCIDR string, subnetsInUse []string, hostIPNets []*net.IPNet) error {
	clusterIP, clusterIPNet, err := net.ParseCIDR(networkCIDR)
	if err != nil {
		return fmt.Errorf("Failed to parse network address: %s", networkCIDR)
	}

	errList := []error{}
	for _, ipNet := range hostIPNets {
		if ipNet.Contains(clusterIP) {
			errList = append(errList, fmt.Errorf("Error: Cluster IP: %s conflicts with host network: %s", clusterIP.String(), ipNet.String()))
		}
		if clusterIPNet.Contains(ipNet.IP) {
			errList = append(errList, fmt.Errorf("Error: Host network with IP: %s conflicts with cluster network: %s", ipNet.IP.String(), networkCIDR))
		}
	}

	for _, netStr := range subnetsInUse {
		subnetIP, _, err := net.ParseCIDR(netStr)
		if err != nil {
			errList = append(errList, fmt.Errorf("Failed to parse network address: %s", netStr))
			continue
		}
		if !clusterIPNet.Contains(subnetIP) {
			errList = append(errList, fmt.Errorf("Error: Existing node subnet: %s is not part of cluster network: %s", netStr, networkCIDR))
		}
	}
	return kerrors.NewAggregate(errList)
}
Esempio n. 20
0
func ensureProcessInContainer(pid int, oomScoreAdj int, manager *fs.Manager) error {
	if runningInHost, err := isProcessRunningInHost(pid); err != nil {
		// Err on the side of caution. Avoid moving the docker daemon unless we are able to identify its context.
		return err
	} else if !runningInHost {
		// Process is running inside a container. Don't touch that.
		return nil
	}

	var errs []error
	cont, err := getContainer(pid)
	if err != nil {
		errs = append(errs, fmt.Errorf("failed to find container of PID %d: %v", pid, err))
	}

	if cont != manager.Cgroups.Name {
		err = manager.Apply(pid)
		if err != nil {
			errs = append(errs, fmt.Errorf("failed to move PID %d (in %q) to %q", pid, cont, manager.Cgroups.Name))
		}
	}

	// Also apply oom-score-adj to processes
	oomAdjuster := oom.NewOOMAdjuster()
	if err := oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err != nil {
		errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d", oomScoreAdj, pid))
	}
	return utilerrors.NewAggregate(errs)
}
Esempio n. 21
0
// InitPlugins initializes each plugin.  All plugins must have unique names.
// This must be called exactly once before any New* methods are called on any
// plugins.
func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, host VolumeHost) error {
	pm.mutex.Lock()
	defer pm.mutex.Unlock()

	if pm.plugins == nil {
		pm.plugins = map[string]VolumePlugin{}
	}

	allErrs := []error{}
	for _, plugin := range plugins {
		name := plugin.Name()
		if errs := validation.IsQualifiedName(name); len(errs) != 0 {
			allErrs = append(allErrs, fmt.Errorf("volume plugin has invalid name: %q: %s", name, strings.Join(errs, ";")))
			continue
		}

		if _, found := pm.plugins[name]; found {
			allErrs = append(allErrs, fmt.Errorf("volume plugin %q was registered more than once", name))
			continue
		}
		err := plugin.Init(host)
		if err != nil {
			glog.Errorf("Failed to load volume plugin %s, error: %s", plugin, err.Error())
			allErrs = append(allErrs, err)
			continue
		}
		pm.plugins[name] = plugin
		glog.V(1).Infof("Loaded volume plugin %q", name)
	}
	return utilerrors.NewAggregate(allErrs)
}
Esempio n. 22
0
// ScopesToRules takes the scopes and return the rules back.  We ALWAYS add the discovery rules and it is possible to get some rules and and
// an error since errors aren't fatal to evaluation
func ScopesToRules(scopes []string, namespace string, clusterPolicyGetter rulevalidation.ClusterPolicyGetter) ([]authorizationapi.PolicyRule, error) {
	rules := append([]authorizationapi.PolicyRule{}, authorizationapi.DiscoveryRule)

	errors := []error{}
	for _, scope := range scopes {
		found := false

		for _, evaluator := range ScopeEvaluators {
			if evaluator.Handles(scope) {
				found = true
				currRules, err := evaluator.ResolveRules(scope, namespace, clusterPolicyGetter)
				if err != nil {
					errors = append(errors, err)
					continue
				}

				rules = append(rules, currRules...)
			}
		}

		if !found {
			errors = append(errors, fmt.Errorf("no scope evaluator found for %q", scope))
		}
	}

	return rules, kutilerrors.NewAggregate(errors)
}
Esempio n. 23
0
// validate converts all of the arguments on the config into references to objects, or returns an error
func (c *AppConfig) validate() (app.ComponentReferences, app.SourceRepositories, cmdutil.Environment, cmdutil.Environment, error) {
	b := c.refBuilder
	c.addReferenceBuilderComponents(b)
	b.AddGroups(c.Groups)
	refs, repos, errs := b.Result()

	if len(c.Strategy) != 0 && len(repos) == 0 {
		errs = append(errs, fmt.Errorf("when --strategy is specified you must provide at least one source code location"))
	}

	if c.BinaryBuild && (len(repos) > 0 || refs.HasSource()) {
		errs = append(errs, fmt.Errorf("specifying binary builds and source repositories at the same time is not allowed"))
	}

	env, duplicateEnv, envErrs := cmdutil.ParseEnvironmentArguments(c.Environment)
	for _, s := range duplicateEnv {
		glog.V(1).Infof("The environment variable %q was overwritten", s)
	}
	errs = append(errs, envErrs...)

	parms, duplicateParms, parmsErrs := cmdutil.ParseEnvironmentArguments(c.TemplateParameters)
	for _, s := range duplicateParms {
		glog.V(1).Infof("The template parameter %q was overwritten", s)
	}
	errs = append(errs, parmsErrs...)

	return refs, repos, env, parms, errors.NewAggregate(errs)
}
Esempio n. 24
0
func validateScopeRestrictions(client *oauthapi.OAuthClient, scope string) error {
	errs := []error{}

	if len(client.ScopeRestrictions) == 0 {
		return nil
	}

	for _, restriction := range client.ScopeRestrictions {
		if len(restriction.ExactValues) > 0 {
			if err := ValidateLiteralScopeRestrictions(scope, restriction.ExactValues); err != nil {
				errs = append(errs, err)
				continue
			}
			return nil
		}

		if restriction.ClusterRole != nil {
			if !clusterRoleEvaluatorInstance.Handles(scope) {
				continue
			}
			if err := ValidateClusterRoleScopeRestrictions(scope, *restriction.ClusterRole); err != nil {
				errs = append(errs, err)
				continue
			}
			return nil
		}
	}

	// if we got here, then nothing matched.   If we already have errors, do nothing, otherwise add one to make it report failed.
	if len(errs) == 0 {
		errs = append(errs, fmt.Errorf("%v did not match any scope restriction", scope))
	}

	return kutilerrors.NewAggregate(errs)
}
Esempio n. 25
0
// validate converts all of the arguments on the config into references to objects, or returns an error
func (c *AppConfig) validate() (app.ComponentReferences, app.SourceRepositories, cmdutil.Environment, cmdutil.Environment, error) {
	b := c.refBuilder
	c.addReferenceBuilderComponents(b)
	b.AddGroups(c.Groups)
	refs, repos, errs := b.Result()

	if len(repos) > 0 {
		repos[0].SetContextDir(c.ContextDir)
		if len(repos) > 1 {
			glog.Warningf("You have specified more than one source repository and a context directory. "+
				"The context directory will be applied to the first repository: %q", repos[0])
		}
	}

	if len(c.Strategy) != 0 && len(repos) == 0 {
		errs = append(errs, fmt.Errorf("when --strategy is specified you must provide at least one source code location"))
	}

	env, duplicateEnv, envErrs := cmdutil.ParseEnvironmentArguments(c.Environment)
	for _, s := range duplicateEnv {
		glog.V(1).Infof("The environment variable %q was overwritten", s)
	}
	errs = append(errs, envErrs...)

	parms, duplicateParms, parmsErrs := cmdutil.ParseEnvironmentArguments(c.TemplateParameters)
	for _, s := range duplicateParms {
		glog.V(1).Infof("The template parameter %q was overwritten", s)
	}
	errs = append(errs, parmsErrs...)

	return refs, repos, env, parms, errors.NewAggregate(errs)
}
Esempio n. 26
0
File: sync.go Progetto: richm/origin
// openshiftGroupNamesOnlyBlacklist returns back a list that contains only the names of the groups.
// Since Group.Name cannot contain '/', the split is safe.  Any resource ref that is not a group
// is skipped.
func openshiftGroupNamesOnlyList(list []string) ([]string, error) {
	ret := []string{}
	errs := []error{}

	for _, curr := range list {
		tokens := strings.SplitN(curr, "/", 2)
		if len(tokens) == 1 {
			ret = append(ret, tokens[0])
			continue
		}

		if tokens[0] != "group" && tokens[0] != "groups" {
			errs = append(errs, fmt.Errorf("%q is not a valid OpenShift group", curr))
			continue
		}

		ret = append(ret, tokens[1])
	}

	if len(errs) > 0 {
		return nil, kerrs.NewAggregate(errs)
	}

	return ret, nil
}
Esempio n. 27
0
// buildTemplates converts a set of resolved, valid references into references to template objects.
func (c *AppConfig) buildTemplates(components app.ComponentReferences, environment app.Environment) ([]runtime.Object, error) {
	objects := []runtime.Object{}

	for _, ref := range components {
		tpl := ref.Input().ResolvedMatch.Template

		glog.V(4).Infof("processing template %s/%s", c.originNamespace, tpl.Name)
		for _, env := range environment.List() {
			// only set environment values that match what's expected by the template.
			if v := template.GetParameterByName(tpl, env.Name); v != nil {
				v.Value = env.Value
				v.Generate = ""
				template.AddParameter(tpl, *v)
			} else {
				return nil, fmt.Errorf("unexpected parameter name %q", env.Name)
			}
		}

		result, err := c.osclient.TemplateConfigs(c.originNamespace).Create(tpl)
		if err != nil {
			return nil, fmt.Errorf("error processing template %s/%s: %v", c.originNamespace, tpl.Name, err)
		}
		errs := runtime.DecodeList(result.Objects, kapi.Scheme)
		if len(errs) > 0 {
			err = errors.NewAggregate(errs)
			return nil, fmt.Errorf("error processing template %s/%s: %v", c.originNamespace, tpl.Name, errs)
		}
		objects = append(objects, result.Objects...)
	}
	return objects, nil
}
Esempio n. 28
0
// AuthenticateRequest authenticates the request using presented client certificates
func (a *Authenticator) AuthenticateRequest(req *http.Request) (user.Info, bool, error) {
	if req.TLS == nil {
		return nil, false, nil
	}

	var errlist []error
	for _, cert := range req.TLS.PeerCertificates {
		chains, err := cert.Verify(a.opts)
		if err != nil {
			errlist = append(errlist, err)
			continue
		}

		for _, chain := range chains {
			user, ok, err := a.user.User(chain)
			if err != nil {
				errlist = append(errlist, err)
				continue
			}

			if ok {
				return user, ok, err
			}
		}
	}
	return nil, false, utilerrors.NewAggregate(errlist)
}
Esempio n. 29
0
func instantiateTemplate(client client.Interface, mapper configcmd.Mapper, templateNamespace, templateName, targetNamespace string, params map[string]string) error {
	template, err := client.Templates(templateNamespace).Get(templateName)
	if err != nil {
		return errors.NewError("cannot retrieve template %q from namespace %q", templateName, templateNamespace).WithCause(err)
	}

	// process the template
	result, err := genappcmd.TransformTemplate(template, client, targetNamespace, params)
	if err != nil {
		return errors.NewError("cannot process template %s/%s", templateNamespace, templateName).WithCause(err)
	}

	// Create objects
	bulk := &configcmd.Bulk{
		Mapper: mapper,
		Op:     configcmd.Create,
	}
	itemsToCreate := &kapi.List{
		Items: result.Objects,
	}
	if errs := bulk.Run(itemsToCreate, targetNamespace); len(errs) > 0 {
		err = kerrors.NewAggregate(errs)
		return errors.NewError("cannot create objects from template %s/%s", templateNamespace, templateName).WithCause(err)
	}

	return nil
}
Esempio n. 30
0
// cleanupDeployment is responsible for cleaning up a deployment ie. retains all but the latest N old replica sets
// where N=d.Spec.RevisionHistoryLimit. Old replica sets are older versions of the podtemplate of a deployment kept
// around by default 1) for historical reasons and 2) for the ability to rollback a deployment.
func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) error {
	if deployment.Spec.RevisionHistoryLimit == nil {
		return nil
	}
	diff := int32(len(oldRSs)) - *deployment.Spec.RevisionHistoryLimit
	if diff <= 0 {
		return nil
	}

	sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))

	var errList []error
	// TODO: This should be parallelized.
	for i := int32(0); i < diff; i++ {
		rs := oldRSs[i]
		// Avoid delete replica set with non-zero replica counts
		if rs.Status.Replicas != 0 || *(rs.Spec.Replicas) != 0 || rs.Generation > rs.Status.ObservedGeneration {
			continue
		}
		if err := dc.client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
			glog.V(2).Infof("Failed deleting old replica set %v for deployment %v: %v", rs.Name, deployment.Name, err)
			errList = append(errList, err)
		}
	}

	return utilerrors.NewAggregate(errList)
}