// nameMatches checks to see if the resourceName of the action is in a the specified whitelist. An empty whitelist indicates that any name is allowed. // An empty string in the whitelist should only match the action's resourceName if the resourceName itself is empty string. This behavior allows for the // combination of a whitelist for gets in the same rule as a list that won't have a resourceName. I don't recommend writing such a rule, but we do // handle it like you'd expect: white list is respected for gets while not preventing the list you explicitly asked for. func (a DefaultAuthorizationAttributes) nameMatches(allowedResourceNames util.StringSet) bool { if len(allowedResourceNames) == 0 { return true } return allowedResourceNames.Has(a.GetResourceName()) }
func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) { names := util.StringSet{} for i, pod := range pods { var errlist []error if errs := validation.ValidatePod(pod); len(errs) != 0 { errlist = append(errlist, errs...) // If validation fails, don't trust it any further - // even Name could be bad. } else { name := kubecontainer.GetPodFullName(pod) if names.Has(name) { errlist = append(errlist, fielderrors.NewFieldDuplicate("name", pod.Name)) } else { names.Insert(name) } } if len(errlist) > 0 { name := bestPodIdentString(pod) err := utilerrors.NewAggregate(errlist) glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err) recorder.Eventf(pod, "failedValidation", "Error validating pod %s from %s, ignoring: %v", name, source, err) continue } filtered = append(filtered, pod) } return }
func validatePorts(ports []api.Port) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} allNames := util.StringSet{} for i := range ports { pErrs := errs.ValidationErrorList{} port := &ports[i] // so we can set default values if len(port.Name) > 0 { if len(port.Name) > 63 || !util.IsDNSLabel(port.Name) { pErrs = append(pErrs, errs.NewFieldInvalid("name", port.Name, "")) } else if allNames.Has(port.Name) { pErrs = append(pErrs, errs.NewFieldDuplicate("name", port.Name)) } else { allNames.Insert(port.Name) } } if port.ContainerPort == 0 { pErrs = append(pErrs, errs.NewFieldRequired("containerPort", port.ContainerPort)) } else if !util.IsValidPortNum(port.ContainerPort) { pErrs = append(pErrs, errs.NewFieldInvalid("containerPort", port.ContainerPort, "")) } if port.HostPort != 0 && !util.IsValidPortNum(port.HostPort) { pErrs = append(pErrs, errs.NewFieldInvalid("hostPort", port.HostPort, "")) } if len(port.Protocol) == 0 { port.Protocol = "TCP" } else if !supportedPortProtocols.Has(strings.ToUpper(string(port.Protocol))) { pErrs = append(pErrs, errs.NewFieldNotSupported("protocol", port.Protocol)) } allErrs = append(allErrs, pErrs.PrefixIndex(i)...) } return allErrs }
// OnUpdate manages the active set of service proxies. // Active service proxies are reinitialized if found in the update set or // shutdown if missing from the update set. func (proxier *Proxier) OnUpdate(services []api.Service) { glog.Infof("Received update notice: %+v", services) activeServices := util.StringSet{} for _, service := range services { activeServices.Insert(service.ID) info, exists := proxier.getServiceInfo(service.ID) if exists && info.port == service.Port { continue } if exists { proxier.StopProxy(service.ID) } glog.Infof("Adding a new service %s on port %d", service.ID, service.Port) listener, err := proxier.addService(service.ID, service.Port) if err != nil { glog.Infof("Failed to start listening for %s on %d", service.ID, service.Port) continue } proxier.setServiceInfo(service.ID, &serviceInfo{ port: service.Port, active: true, listener: listener, }) } proxier.mu.Lock() defer proxier.mu.Unlock() for name, info := range proxier.serviceMap { if !activeServices.Has(name) { proxier.stopProxyInternal(info) } } }
func validateVolumes(volumes []api.Volume) (util.StringSet, errs.ValidationErrorList) { allErrs := errs.ValidationErrorList{} allNames := util.StringSet{} for i := range volumes { vol := &volumes[i] // so we can set default values el := errs.ValidationErrorList{} if vol.Source == nil { // TODO: Enforce that a source is set once we deprecate the implied form. vol.Source = &api.VolumeSource{ EmptyDir: &api.EmptyDir{}, } } el = validateSource(vol.Source).Prefix("source") if len(vol.Name) == 0 { el = append(el, errs.NewFieldRequired("name", vol.Name)) } else if !util.IsDNSLabel(vol.Name) { el = append(el, errs.NewFieldInvalid("name", vol.Name, "")) } else if allNames.Has(vol.Name) { el = append(el, errs.NewFieldDuplicate("name", vol.Name)) } if len(el) == 0 { allNames.Insert(vol.Name) } else { allErrs = append(allErrs, el.PrefixIndex(i)...) } } return allNames, allErrs }
func validateContainers(containers []Container, volumes util.StringSet) errorList { allErrs := errorList{} allNames := util.StringSet{} for i := range containers { ctr := &containers[i] // so we can set default values if !util.IsDNSLabel(ctr.Name) { allErrs.Append(makeInvalidError("Container.Name", ctr.Name)) } else if allNames.Has(ctr.Name) { allErrs.Append(makeDuplicateError("Container.Name", ctr.Name)) } else { allNames.Insert(ctr.Name) } if len(ctr.Image) == 0 { allErrs.Append(makeInvalidError("Container.Image", ctr.Name)) } allErrs.Append(validatePorts(ctr.Ports)...) allErrs.Append(validateEnv(ctr.Env)...) allErrs.Append(validateVolumeMounts(ctr.VolumeMounts, volumes)...) } // Check for colliding ports across all containers. // TODO(thockin): This really is dependent on the network config of the host (IP per pod?) // and the config of the new manifest. But we have not specced that out yet, so we'll just // make some assumptions for now. As of now, pods share a network namespace, which means that // every Port.HostPort across the whole pod must be unique. allErrs.Append(checkHostPortConflicts(containers)...) return allErrs }
func validatePorts(ports []api.Port) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} allNames := util.StringSet{} for i, port := range ports { pErrs := errs.ValidationErrorList{} if len(port.Name) > 0 { if len(port.Name) > util.DNS1123LabelMaxLength || !util.IsDNSLabel(port.Name) { pErrs = append(pErrs, errs.NewFieldInvalid("name", port.Name, dnsLabelErrorMsg)) } else if allNames.Has(port.Name) { pErrs = append(pErrs, errs.NewFieldDuplicate("name", port.Name)) } else { allNames.Insert(port.Name) } } if port.ContainerPort == 0 { pErrs = append(pErrs, errs.NewFieldInvalid("containerPort", port.ContainerPort, portRangeErrorMsg)) } else if !util.IsValidPortNum(port.ContainerPort) { pErrs = append(pErrs, errs.NewFieldInvalid("containerPort", port.ContainerPort, portRangeErrorMsg)) } if port.HostPort != 0 && !util.IsValidPortNum(port.HostPort) { pErrs = append(pErrs, errs.NewFieldInvalid("hostPort", port.HostPort, portRangeErrorMsg)) } if len(port.Protocol) == 0 { pErrs = append(pErrs, errs.NewFieldRequired("protocol", port.Protocol)) } else if !supportedPortProtocols.Has(strings.ToUpper(string(port.Protocol))) { pErrs = append(pErrs, errs.NewFieldNotSupported("protocol", port.Protocol)) } allErrs = append(allErrs, pErrs.PrefixIndex(i)...) } return allErrs }
func validateVolumes(volumes []Volume) (util.StringSet, errs.ErrorList) { allErrs := errs.ErrorList{} allNames := util.StringSet{} for i := range volumes { vol := &volumes[i] // so we can set default values el := errs.ErrorList{} // TODO(thockin) enforce that a source is set once we deprecate the implied form. if vol.Source != nil { el = validateSource(vol.Source).Prefix("source") } if len(vol.Name) == 0 { el = append(el, errs.NewRequired("name", vol.Name)) } else if !util.IsDNSLabel(vol.Name) { el = append(el, errs.NewInvalid("name", vol.Name)) } else if allNames.Has(vol.Name) { el = append(el, errs.NewDuplicate("name", vol.Name)) } if len(el) == 0 { allNames.Insert(vol.Name) } else { allErrs = append(allErrs, el.PrefixIndex(i)...) } } return allNames, allErrs }
func validatePorts(ports []Port) errorList { allErrs := errorList{} allNames := util.StringSet{} for i := range ports { port := &ports[i] // so we can set default values if len(port.Name) > 0 { if len(port.Name) > 63 || !util.IsDNSLabel(port.Name) { allErrs.Append(makeInvalidError("Port.Name", port.Name)) } else if allNames.Has(port.Name) { allErrs.Append(makeDuplicateError("Port.name", port.Name)) } else { allNames.Insert(port.Name) } } if !util.IsValidPortNum(port.ContainerPort) { allErrs.Append(makeInvalidError("Port.ContainerPort", port.ContainerPort)) } if port.HostPort == 0 { port.HostPort = port.ContainerPort } else if !util.IsValidPortNum(port.HostPort) { allErrs.Append(makeInvalidError("Port.HostPort", port.HostPort)) } if len(port.Protocol) == 0 { port.Protocol = "TCP" } else if !supportedPortProtocols.Has(strings.ToUpper(port.Protocol)) { allErrs.Append(makeNotSupportedError("Port.Protocol", port.Protocol)) } } return allErrs }
// Pass ports=nil for all ports. func formatEndpoints(endpoints *api.Endpoints, ports util.StringSet) string { if len(endpoints.Subsets) == 0 { return "<none>" } list := []string{} max := 3 more := false count := 0 for i := range endpoints.Subsets { ss := &endpoints.Subsets[i] for i := range ss.Ports { port := &ss.Ports[i] if ports == nil || ports.Has(port.Name) { for i := range ss.Addresses { if len(list) == max { more = true } addr := &ss.Addresses[i] if !more { list = append(list, fmt.Sprintf("%s:%d", addr.IP, port.Port)) } count++ } } } } ret := strings.Join(list, ",") if more { return fmt.Sprintf("%s + %d more...", ret, count-max) } return ret }
func validateVolumeMounts(mounts []VolumeMount, volumes util.StringSet) errorList { allErrs := errorList{} for i := range mounts { mnt := &mounts[i] // so we can set default values if len(mnt.Name) == 0 { allErrs.Append(makeInvalidError("VolumeMount.Name", mnt.Name)) } else if !volumes.Has(mnt.Name) { allErrs.Append(makeNotFoundError("VolumeMount.Name", mnt.Name)) } if len(mnt.MountPath) == 0 { // Backwards compat. if len(mnt.Path) == 0 { allErrs.Append(makeInvalidError("VolumeMount.MountPath", mnt.MountPath)) } else { glog.Warning("DEPRECATED: VolumeMount.Path has been replaced by VolumeMount.MountPath") mnt.MountPath = mnt.Path mnt.Path = "" } } if len(mnt.MountType) != 0 { glog.Warning("DEPRECATED: VolumeMount.MountType will be removed. The Volume struct will handle types") } } return allErrs }
func (o AddSecretOptions) AddSecretsToSAMountableSecrets(serviceAccount *api.ServiceAccount) (*api.ServiceAccount, error) { secrets, err := o.getSecrets() if err != nil { return nil, err } if len(secrets) == 0 { return nil, errors.New("no secrets found") } currentSecrets := util.StringSet{} for _, secretRef := range serviceAccount.Secrets { currentSecrets.Insert(secretRef.Name) } for _, secret := range secrets { if currentSecrets.Has(secret.Name) { continue } serviceAccount.Secrets = append(serviceAccount.Secrets, api.ObjectReference{Name: secret.Name}) currentSecrets.Insert(secret.Name) } return o.ClientInterface.ServiceAccounts(o.Namespace).Update(serviceAccount) }
// OnUpdate manages the active set of service proxies. // Active service proxies are reinitialized if found in the update set or // shutdown if missing from the update set. func (proxier *Proxier) OnUpdate(services []api.Service) { glog.V(4).Infof("Received update notice: %+v", services) activeServices := util.StringSet{} for _, service := range services { activeServices.Insert(service.Name) info, exists := proxier.getServiceInfo(service.Name) serviceIP := net.ParseIP(service.Spec.PortalIP) // TODO: check health of the socket? What if ProxyLoop exited? if exists && info.portalPort == service.Spec.Port && info.portalIP.Equal(serviceIP) { continue } if exists && (info.portalPort != service.Spec.Port || !info.portalIP.Equal(serviceIP) || !ipsEqual(service.Spec.PublicIPs, info.publicIP)) { glog.V(4).Infof("Something changed for service %q: stopping it", service.Name) err := proxier.closePortal(service.Name, info) if err != nil { glog.Errorf("Failed to close portal for %q: %v", service.Name, err) } err = proxier.stopProxy(service.Name, info) if err != nil { glog.Errorf("Failed to stop service %q: %v", service.Name, err) } } glog.V(1).Infof("Adding new service %q at %s:%d/%s (local :%d)", service.Name, serviceIP, service.Spec.Port, service.Spec.Protocol, service.Spec.ProxyPort) info, err := proxier.addServiceOnPort(service.Name, service.Spec.Protocol, service.Spec.ProxyPort, udpIdleTimeout) if err != nil { glog.Errorf("Failed to start proxy for %q: %v", service.Name, err) continue } info.portalIP = serviceIP info.portalPort = service.Spec.Port info.publicIP = service.Spec.PublicIPs info.sessionAffinityType = service.Spec.SessionAffinity // TODO: paramaterize this in the types api file as an attribute of sticky session. For now it's hardcoded to 3 hours. info.stickyMaxAgeMinutes = 180 glog.V(4).Infof("info: %+v", info) err = proxier.openPortal(service.Name, info) if err != nil { glog.Errorf("Failed to open portal for %q: %v", service.Name, err) } proxier.loadBalancer.NewService(service.Name, info.sessionAffinityType, info.stickyMaxAgeMinutes) } proxier.mu.Lock() defer proxier.mu.Unlock() for name, info := range proxier.serviceMap { if !activeServices.Has(name) { glog.V(1).Infof("Stopping service %q", name) err := proxier.closePortal(name, info) if err != nil { glog.Errorf("Failed to close portal for %q: %v", name, err) } err = proxier.stopProxyInternal(name, info) if err != nil { glog.Errorf("Failed to stop service %q: %v", name, err) } } } }
func findKnownValue(parts []string, valueOptions util.StringSet) int { for i := range parts { if valueOptions.Has(parts[i]) { return i } } return -1 }
// purgeDeletedNamespaces will remove all namespaces enumerated in a reviewRecordStore that are not in the namespace set func purgeDeletedNamespaces(namespaceSet *util.StringSet, userSubjectRecordStore cache.Store, groupSubjectRecordStore cache.Store, reviewRecordStore cache.Store) { reviewRecordItems := reviewRecordStore.List() for i := range reviewRecordItems { reviewRecord := reviewRecordItems[i].(*reviewRecord) if !namespaceSet.Has(reviewRecord.namespace) { deleteNamespaceFromSubjects(userSubjectRecordStore, reviewRecord.users, reviewRecord.namespace) deleteNamespaceFromSubjects(groupSubjectRecordStore, reviewRecord.groups, reviewRecord.namespace) reviewRecordStore.Delete(reviewRecord) } } }
func TestOrphanBuildResolver(t *testing.T) { activeBuildConfig := mockBuildConfig("a", "active-build-config") inactiveBuildConfig := mockBuildConfig("a", "inactive-build-config") buildConfigs := []*buildapi.BuildConfig{activeBuildConfig} builds := []*buildapi.Build{} expectedNames := util.StringSet{} buildStatusOptions := []buildapi.BuildStatus{ buildapi.BuildStatusCancelled, buildapi.BuildStatusComplete, buildapi.BuildStatusError, buildapi.BuildStatusFailed, buildapi.BuildStatusNew, buildapi.BuildStatusPending, buildapi.BuildStatusRunning, } buildStatusFilter := []buildapi.BuildStatus{ buildapi.BuildStatusCancelled, buildapi.BuildStatusComplete, buildapi.BuildStatusError, buildapi.BuildStatusFailed, } buildStatusFilterSet := util.StringSet{} for _, buildStatus := range buildStatusFilter { buildStatusFilterSet.Insert(string(buildStatus)) } for _, buildStatusOption := range buildStatusOptions { builds = append(builds, withStatus(mockBuild("a", string(buildStatusOption)+"-active", activeBuildConfig), buildStatusOption)) builds = append(builds, withStatus(mockBuild("a", string(buildStatusOption)+"-inactive", inactiveBuildConfig), buildStatusOption)) builds = append(builds, withStatus(mockBuild("a", string(buildStatusOption)+"-orphan", nil), buildStatusOption)) if buildStatusFilterSet.Has(string(buildStatusOption)) { expectedNames.Insert(string(buildStatusOption) + "-inactive") expectedNames.Insert(string(buildStatusOption) + "-orphan") } } dataSet := NewDataSet(buildConfigs, builds) resolver := NewOrphanBuildResolver(dataSet, buildStatusFilter) results, err := resolver.Resolve() if err != nil { t.Errorf("Unexpected error %v", err) } foundNames := util.StringSet{} for _, result := range results { foundNames.Insert(result.Name) } if len(foundNames) != len(expectedNames) || !expectedNames.HasAll(foundNames.List()...) { t.Errorf("expected %v, actual %v", expectedNames, foundNames) } }
func validateIPsOrFail(c *client.Client, ns string, expectedPods []string, ips util.StringSet) { for _, name := range expectedPods { pod, err := c.Pods(ns).Get(name) if err != nil { Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err) } if !ips.Has(pod.Status.PodIP) { Failf("ip validation failed, expected: %v, saw: %v", ips, pod.Status.PodIP) } By(fmt.Sprintf("")) } By(fmt.Sprintf("successfully validated IPs %v against expected endpoints %v on namespace %s", ips, expectedPods, ns)) }
func appliesToUser(ruleUsers, ruleGroups util.StringSet, user user.Info) bool { if ruleUsers.Has(user.GetName()) { return true } for _, currGroup := range user.GetGroups() { if ruleGroups.Has(currGroup) { return true } } return false }
// OnUpdate manages the active set of service proxies. // Active service proxies are reinitialized if found in the update set or // shutdown if missing from the update set. func (proxier *Proxier) OnUpdate(services []api.Service) { glog.V(4).Infof("Received update notice: %+v", services) activeServices := util.StringSet{} for _, service := range services { activeServices.Insert(service.Name) info, exists := proxier.getServiceInfo(service.Name) serviceIP := net.ParseIP(service.Spec.PortalIP) // TODO: check health of the socket? What if ProxyLoop exited? if exists && info.isActive() && info.portalPort == service.Spec.Port && info.portalIP.Equal(serviceIP) { continue } if exists && (info.portalPort != service.Spec.Port || !info.portalIP.Equal(serviceIP)) { glog.V(4).Infof("Something changed for service %q: stopping it", service.Name) err := proxier.closePortal(service.Name, info) if err != nil { glog.Errorf("Failed to close portal for %q: %s", service.Name, err) } err = proxier.stopProxy(service.Name, info) if err != nil { glog.Errorf("Failed to stop service %q: %s", service.Name, err) } } glog.V(1).Infof("Adding new service %q at %s:%d/%s (local :%d)", service.Name, serviceIP, service.Spec.Port, service.Spec.Protocol, service.Spec.ProxyPort) info, err := proxier.addServiceOnPort(service.Name, service.Spec.Protocol, service.Spec.ProxyPort, udpIdleTimeout) if err != nil { glog.Errorf("Failed to start proxy for %q: %+v", service.Name, err) continue } info.portalIP = serviceIP info.portalPort = service.Spec.Port err = proxier.openPortal(service.Name, info) if err != nil { glog.Errorf("Failed to open portal for %q: %s", service.Name, err) } } proxier.mu.Lock() defer proxier.mu.Unlock() for name, info := range proxier.serviceMap { if !activeServices.Has(name) { glog.V(1).Infof("Stopping service %q", name) err := proxier.closePortal(name, info) if err != nil { glog.Errorf("Failed to close portal for %q: %s", name, err) } err = proxier.stopProxyInternal(name, info) if err != nil { glog.Errorf("Failed to stop service %q: %s", name, err) } } } }
func (g *conversionGenerator) RepackImports(exclude util.StringSet) { var packages []string for key := range g.imports { packages = append(packages, key) } sort.Strings(packages) g.imports = make(map[string]string) g.shortImports = make(map[string]string) for _, pkg := range packages { if !exclude.Has(pkg) { g.addImportByPath(pkg) } } }
func computeStatus(statusList []*github.CombinedStatus, requiredContexts []string) string { states := util.StringSet{} providers := util.StringSet{} for ix := range statusList { status := statusList[ix] glog.V(8).Infof("Checking commit: %s", *status.SHA) glog.V(8).Infof("Checking commit: %v", status) states.Insert(*status.State) for _, subStatus := range status.Statuses { glog.V(8).Infof("Found status from: %v", subStatus) providers.Insert(*subStatus.Context) } } for _, provider := range requiredContexts { if !providers.Has(provider) { glog.V(8).Infof("Failed to find %s in %v", provider, providers) return "incomplete" } } switch { case states.Has("pending"): return "pending" case states.Has("error"): return "error" case states.Has("failure"): return "failure" default: return "success" } }
// Calls "udevadm trigger --action=change" for newly created "/dev/sd*" drives (exist only in after set). // This is workaround for Issue #7972. Once the underlying issue has been resolved, this may be removed. func udevadmChangeToNewDrives(sdBeforeSet util.StringSet) error { sdAfter, err := filepath.Glob(diskSDPattern) if err != nil { return fmt.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err) } for _, sd := range sdAfter { if !sdBeforeSet.Has(sd) { return udevadmChangeToDrive(sd) } } return nil }
func getUniqueName(basename string, existingNames *util.StringSet) string { if !existingNames.Has(basename) { return basename } for i := 0; i < 100; i++ { trialName := fmt.Sprintf("%v-%d", basename, i) if !existingNames.Has(trialName) { return trialName } } return string(util.NewUUID()) }
func validateVolumes(volumes []Volume) (util.StringSet, error) { allNames := util.StringSet{} for i := range volumes { vol := &volumes[i] // so we can set default values if !util.IsDNSLabel(vol.Name) { return util.StringSet{}, makeInvalidError("Volume.Name", vol.Name) } if allNames.Has(vol.Name) { return util.StringSet{}, makeDuplicateError("Volume.Name", vol.Name) } allNames.Insert(vol.Name) } return allNames, nil }
func TestOrphanDeploymentResolver(t *testing.T) { activeDeploymentConfig := mockDeploymentConfig("a", "active-deployment-config") inactiveDeploymentConfig := mockDeploymentConfig("a", "inactive-deployment-config") deploymentConfigs := []*deployapi.DeploymentConfig{activeDeploymentConfig} deployments := []*kapi.ReplicationController{} expectedNames := util.StringSet{} deploymentStatusOptions := []deployapi.DeploymentStatus{ deployapi.DeploymentStatusComplete, deployapi.DeploymentStatusFailed, deployapi.DeploymentStatusNew, deployapi.DeploymentStatusPending, deployapi.DeploymentStatusRunning, } deploymentStatusFilter := []deployapi.DeploymentStatus{ deployapi.DeploymentStatusComplete, deployapi.DeploymentStatusFailed, } deploymentStatusFilterSet := util.StringSet{} for _, deploymentStatus := range deploymentStatusFilter { deploymentStatusFilterSet.Insert(string(deploymentStatus)) } for _, deploymentStatusOption := range deploymentStatusOptions { deployments = append(deployments, withStatus(mockDeployment("a", string(deploymentStatusOption)+"-active", activeDeploymentConfig), deploymentStatusOption)) deployments = append(deployments, withStatus(mockDeployment("a", string(deploymentStatusOption)+"-inactive", inactiveDeploymentConfig), deploymentStatusOption)) deployments = append(deployments, withStatus(mockDeployment("a", string(deploymentStatusOption)+"-orphan", nil), deploymentStatusOption)) if deploymentStatusFilterSet.Has(string(deploymentStatusOption)) { expectedNames.Insert(string(deploymentStatusOption) + "-inactive") expectedNames.Insert(string(deploymentStatusOption) + "-orphan") } } dataSet := NewDataSet(deploymentConfigs, deployments) resolver := NewOrphanDeploymentResolver(dataSet, deploymentStatusFilter) results, err := resolver.Resolve() if err != nil { t.Errorf("Unexpected error %v", err) } foundNames := util.StringSet{} for _, result := range results { foundNames.Insert(result.Name) } if len(foundNames) != len(expectedNames) || !expectedNames.HasAll(foundNames.List()...) { t.Errorf("expected %v, actual %v", expectedNames, foundNames) } }
func validateVolumes(volumes []Volume) (util.StringSet, errorList) { allErrs := errorList{} allNames := util.StringSet{} for i := range volumes { vol := &volumes[i] // so we can set default values if !util.IsDNSLabel(vol.Name) { allErrs.Append(makeInvalidError("Volume.Name", vol.Name)) } else if allNames.Has(vol.Name) { allErrs.Append(makeDuplicateError("Volume.Name", vol.Name)) } else { allNames.Insert(vol.Name) } } return allNames, allErrs }
// TestCreatesAllowedDuringNamespaceDeletion checks to make sure that the resources in the whitelist are allowed func TestCreatesAllowedDuringNamespaceDeletion(t *testing.T) { config := &origin.MasterConfig{ KubeletClientConfig: &kclient.KubeletConfig{}, } storageMap := config.GetRestStorage() resources := util.StringSet{} for resource := range storageMap { resources.Insert(strings.ToLower(resource)) } for resource := range recommendedCreatableResources { if !resources.Has(resource) { t.Errorf("recommendedCreatableResources has resource %v, but that resource isn't registered.", resource) } } }
func validateContainers(containers []api.Container, volumes util.StringSet) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} if len(containers) == 0 { return append(allErrs, errs.NewFieldRequired("")) } allNames := util.StringSet{} for i, ctr := range containers { cErrs := errs.ValidationErrorList{} capabilities := capabilities.Get() if len(ctr.Name) == 0 { cErrs = append(cErrs, errs.NewFieldRequired("name")) } else if !util.IsDNS1123Label(ctr.Name) { cErrs = append(cErrs, errs.NewFieldInvalid("name", ctr.Name, dns1123LabelErrorMsg)) } else if allNames.Has(ctr.Name) { cErrs = append(cErrs, errs.NewFieldDuplicate("name", ctr.Name)) } else if ctr.Privileged && !capabilities.AllowPrivileged { cErrs = append(cErrs, errs.NewFieldForbidden("privileged", ctr.Privileged)) } else { allNames.Insert(ctr.Name) } if len(ctr.Image) == 0 { cErrs = append(cErrs, errs.NewFieldRequired("image")) } if ctr.Lifecycle != nil { cErrs = append(cErrs, validateLifecycle(ctr.Lifecycle).Prefix("lifecycle")...) } cErrs = append(cErrs, validateProbe(ctr.LivenessProbe).Prefix("livenessProbe")...) cErrs = append(cErrs, validateProbe(ctr.ReadinessProbe).Prefix("readinessProbe")...) cErrs = append(cErrs, validatePorts(ctr.Ports).Prefix("ports")...) cErrs = append(cErrs, validateEnv(ctr.Env).Prefix("env")...) cErrs = append(cErrs, validateVolumeMounts(ctr.VolumeMounts, volumes).Prefix("volumeMounts")...) cErrs = append(cErrs, validatePullPolicy(&ctr).Prefix("pullPolicy")...) cErrs = append(cErrs, validateResourceRequirements(&ctr).Prefix("resources")...) allErrs = append(allErrs, cErrs.PrefixIndex(i)...) } // Check for colliding ports across all containers. // TODO(thockin): This really is dependent on the network config of the host (IP per pod?) // and the config of the new manifest. But we have not specced that out yet, so we'll just // make some assumptions for now. As of now, pods share a network namespace, which means that // every Port.HostPort across the whole pod must be unique. allErrs = append(allErrs, checkHostPortConflicts(containers)...) return allErrs }
func validateVolumeMounts(mounts []api.VolumeMount, volumes util.StringSet) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} for i, mnt := range mounts { mErrs := errs.ValidationErrorList{} if len(mnt.Name) == 0 { mErrs = append(mErrs, errs.NewFieldRequired("name", mnt.Name)) } else if !volumes.Has(mnt.Name) { mErrs = append(mErrs, errs.NewFieldNotFound("name", mnt.Name)) } if len(mnt.MountPath) == 0 { mErrs = append(mErrs, errs.NewFieldRequired("mountPath", mnt.MountPath)) } allErrs = append(allErrs, mErrs.PrefixIndex(i)...) } return allErrs }
func (o *NewGroupOptions) AddGroup() error { group := &userapi.Group{} group.Name = o.Group usedNames := util.StringSet{} for _, user := range o.Users { if usedNames.Has(user) { continue } usedNames.Insert(user) group.Users = append(group.Users, user) } _, err := o.GroupClient.Create(group) return err }