func AddHPAScaleRefEdges(g osgraph.Graph) { for _, node := range g.NodesByKind(kubegraph.HorizontalPodAutoscalerNodeKind) { hpaNode := node.(*kubegraph.HorizontalPodAutoscalerNode) syntheticMeta := kapi.ObjectMeta{ Name: hpaNode.HorizontalPodAutoscaler.Spec.ScaleRef.Name, Namespace: hpaNode.HorizontalPodAutoscaler.Namespace, } var groupVersionResource unversioned.GroupVersionResource resource := strings.ToLower(hpaNode.HorizontalPodAutoscaler.Spec.ScaleRef.Kind) if groupVersion, err := unversioned.ParseGroupVersion(hpaNode.HorizontalPodAutoscaler.Spec.ScaleRef.APIVersion); err == nil { groupVersionResource = groupVersion.WithResource(resource) } else { groupVersionResource = unversioned.GroupVersionResource{Resource: resource} } groupVersionResource, err := registered.RESTMapper().ResourceFor(groupVersionResource) if err != nil { continue } var syntheticNode graph.Node switch groupVersionResource.GroupResource() { case kapi.Resource("replicationcontrollers"): syntheticNode = kubegraph.FindOrCreateSyntheticReplicationControllerNode(g, &kapi.ReplicationController{ObjectMeta: syntheticMeta}) case deployapi.Resource("deploymentconfigs"): syntheticNode = deploygraph.FindOrCreateSyntheticDeploymentConfigNode(g, &deployapi.DeploymentConfig{ObjectMeta: syntheticMeta}) default: continue } g.AddEdge(hpaNode, syntheticNode, ScalingEdgeKind) } }
// FindHPASpecsMissingScaleRefs finds all Horizontal Pod Autoscalers whose scale reference points to an object that doesn't exist // or that the client does not have the permission to see. func FindHPASpecsMissingScaleRefs(graph osgraph.Graph, namer osgraph.Namer) []osgraph.Marker { markers := []osgraph.Marker{} for _, uncastNode := range graph.NodesByKind(kubenodes.HorizontalPodAutoscalerNodeKind) { node := uncastNode.(*kubenodes.HorizontalPodAutoscalerNode) scaledObjects := graph.SuccessorNodesByEdgeKind( uncastNode, kubegraph.ScalingEdgeKind, ) if len(scaledObjects) < 1 { markers = append(markers, createMissingScaleRefMarker(node, nil, namer)) continue } for _, scaleRef := range scaledObjects { if existenceChecker, ok := scaleRef.(osgraph.ExistenceChecker); ok && !existenceChecker.Found() { // if this node is synthetic, we can't be sure that the HPA is scaling something that actually exists markers = append(markers, createMissingScaleRefMarker(node, scaleRef, namer)) } } } return markers }
// FindMissingSecrets inspects all PodSpecs for any Secret reference that is a synthetic node (not a pre-existing node in the graph) func FindMissingSecrets(g osgraph.Graph) []osgraph.Marker { markers := []osgraph.Marker{} for _, uncastPodSpecNode := range g.NodesByKind(kubegraph.PodSpecNodeKind) { podSpecNode := uncastPodSpecNode.(*kubegraph.PodSpecNode) missingSecrets := CheckMissingMountedSecrets(g, podSpecNode) topLevelNode := osgraph.GetTopLevelContainerNode(g, podSpecNode) topLevelString := g.Name(topLevelNode) if resourceStringer, ok := topLevelNode.(osgraph.ResourceNode); ok { topLevelString = resourceStringer.ResourceString() } for _, missingSecret := range missingSecrets { markers = append(markers, osgraph.Marker{ Node: podSpecNode, RelatedNodes: []graph.Node{missingSecret}, Severity: osgraph.WarningSeverity, Key: UnmountableSecretWarning, Message: fmt.Sprintf("%s is attempting to mount a missing secret %s", topLevelString, missingSecret.ResourceString()), }) } } return markers }
// FindUnpushableBuildConfigs checks all build configs that will output to an IST backed by an ImageStream and checks to make sure their builds can push. func FindUnpushableBuildConfigs(g osgraph.Graph) []osgraph.Marker { markers := []osgraph.Marker{} bc: for _, bcNode := range g.NodesByKind(buildgraph.BuildConfigNodeKind) { for _, istNode := range g.SuccessorNodesByEdgeKind(bcNode, buildedges.BuildOutputEdgeKind) { for _, uncastImageStreamNode := range g.SuccessorNodesByEdgeKind(istNode, imageedges.ReferencedImageStreamGraphEdgeKind) { imageStreamNode := uncastImageStreamNode.(*imagegraph.ImageStreamNode) if len(imageStreamNode.Status.DockerImageRepository) == 0 { markers = append(markers, osgraph.Marker{ Node: bcNode, RelatedNodes: []graph.Node{istNode}, Severity: osgraph.WarningSeverity, Key: MissingRequiredRegistryWarning, Message: fmt.Sprintf("%s is pushing to %s that is using %s, but the administrator has not configured the integrated Docker registry. (oadm registry)", bcNode.(*buildgraph.BuildConfigNode).ResourceString(), istNode.(*imagegraph.ImageStreamTagNode).ResourceString(), imageStreamNode.ResourceString()), }) continue bc } } } } return markers }
// FindDeploymentConfigReadinessWarnings inspects deploymentconfigs and reports those that // don't have readiness probes set up. func FindDeploymentConfigReadinessWarnings(g osgraph.Graph, f osgraph.Namer, setProbeCommand string) []osgraph.Marker { markers := []osgraph.Marker{} Node: for _, uncastDcNode := range g.NodesByKind(deploygraph.DeploymentConfigNodeKind) { dcNode := uncastDcNode.(*deploygraph.DeploymentConfigNode) if t := dcNode.DeploymentConfig.Spec.Template; t != nil && len(t.Spec.Containers) > 0 { for _, container := range t.Spec.Containers { if container.ReadinessProbe != nil { continue Node } } // All of the containers in the deployment config lack a readiness probe markers = append(markers, osgraph.Marker{ Node: uncastDcNode, Severity: osgraph.WarningSeverity, Key: MissingReadinessProbeWarning, Message: fmt.Sprintf("%s has no readiness probe to verify pods are ready to accept traffic or ensure deployment is successful.", f.ResourceName(dcNode)), Suggestion: osgraph.Suggestion(fmt.Sprintf("%s %s --readiness ...", setProbeCommand, f.ResourceName(dcNode))), }) continue Node } } return markers }
// FindRestartingPods inspects all Pods to see if they've restarted more than the threshold func FindRestartingPods(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { markers := []osgraph.Marker{} for _, uncastPodNode := range g.NodesByKind(kubegraph.PodNodeKind) { podNode := uncastPodNode.(*kubegraph.PodNode) pod, ok := podNode.Object().(*kapi.Pod) if !ok { continue } for _, containerStatus := range pod.Status.ContainerStatuses { if containerStatus.RestartCount >= RestartThreshold { markers = append(markers, osgraph.Marker{ Node: podNode, Severity: osgraph.WarningSeverity, Key: RestartingPodWarning, Message: fmt.Sprintf("container %q in %s has restarted %d times", containerStatus.Name, f.ResourceName(podNode), containerStatus.RestartCount), }) } } } return markers }
// FindMissingLivenessProbes inspects all PodSpecs for missing liveness probes and generates a list of non-duplicate markers func FindMissingLivenessProbes(g osgraph.Graph, f osgraph.Namer, setProbeCommand string) []osgraph.Marker { markers := []osgraph.Marker{} for _, uncastPodSpecNode := range g.NodesByKind(kubegraph.PodSpecNodeKind) { podSpecNode := uncastPodSpecNode.(*kubegraph.PodSpecNode) if hasLivenessProbe(podSpecNode) { continue } topLevelNode := osgraph.GetTopLevelContainerNode(g, podSpecNode) // skip any podSpec nodes that are managed by other nodes. // Liveness probes should only be applied to a controlling // podSpec node, and not to any of its children. if hasControllerRefEdge(g, topLevelNode) { continue } topLevelString := f.ResourceName(topLevelNode) markers = append(markers, osgraph.Marker{ Node: podSpecNode, RelatedNodes: []graph.Node{topLevelNode}, Severity: osgraph.InfoSeverity, Key: MissingLivenessProbeWarning, Message: fmt.Sprintf("%s has no liveness probe to verify pods are still running.", topLevelString), Suggestion: osgraph.Suggestion(fmt.Sprintf("%s %s --liveness ...", setProbeCommand, topLevelString)), }) } return markers }
// FindUnmountableSecrets inspects all PodSpecs for any Secret reference that isn't listed as mountable by the referenced ServiceAccount func FindUnmountableSecrets(g osgraph.Graph) []osgraph.Marker { markers := []osgraph.Marker{} for _, uncastPodSpecNode := range g.NodesByKind(kubegraph.PodSpecNodeKind) { podSpecNode := uncastPodSpecNode.(*kubegraph.PodSpecNode) unmountableSecrets := CheckForUnmountableSecrets(g, podSpecNode) topLevelNode := osgraph.GetTopLevelContainerNode(g, podSpecNode) topLevelString := g.Name(topLevelNode) if resourceStringer, ok := topLevelNode.(osgraph.ResourceNode); ok { topLevelString = resourceStringer.ResourceString() } saString := "MISSING_SA" saNodes := g.SuccessorNodesByEdgeKind(podSpecNode, kubeedges.ReferencedServiceAccountEdgeKind) if len(saNodes) > 0 { saString = saNodes[0].(*kubegraph.ServiceAccountNode).ResourceString() } for _, unmountableSecret := range unmountableSecrets { markers = append(markers, osgraph.Marker{ Node: podSpecNode, RelatedNodes: []graph.Node{unmountableSecret}, Severity: osgraph.WarningSeverity, Key: UnmountableSecretWarning, Message: fmt.Sprintf("%s is attempting to mount a secret %s disallowed by %s", topLevelString, unmountableSecret.ResourceString(), saString), }) } } return markers }
// FindDeploymentConfigTriggerErrors checks for possible failures in deployment config // image change triggers. // // Precedence of failures: // 1. The image stream of the tag of interest does not exist. // 2. The image stream tag does not exist but a build config points to it. // 3. The image stream tag does not exist. func FindDeploymentConfigTriggerErrors(g osgraph.Graph) []osgraph.Marker { markers := []osgraph.Marker{} dc: for _, uncastDcNode := range g.NodesByKind(deploygraph.DeploymentConfigNodeKind) { for _, uncastIstNode := range g.PredecessorNodesByEdgeKind(uncastDcNode, deployedges.TriggersDeploymentEdgeKind) { if istNode := uncastIstNode.(*imagegraph.ImageStreamTagNode); !istNode.Found() { dcNode := uncastDcNode.(*deploygraph.DeploymentConfigNode) // 1. Image stream for tag of interest does not exist if isNode, exists := doesImageStreamExist(g, uncastIstNode); !exists { markers = append(markers, osgraph.Marker{ Node: uncastDcNode, RelatedNodes: []graph.Node{uncastIstNode, isNode}, Severity: osgraph.WarningSeverity, Key: MissingImageStreamWarning, Message: fmt.Sprintf("The image trigger for %s will have no effect because %s does not exist.", dcNode.ResourceString(), isNode.(*imagegraph.ImageStreamNode).ResourceString()), }) continue dc } // 2. Build config points to image stream tag of interest if bcNode, points := buildPointsToTag(g, uncastIstNode); points { markers = append(markers, osgraph.Marker{ Node: uncastDcNode, RelatedNodes: []graph.Node{uncastIstNode, bcNode}, Severity: osgraph.InfoSeverity, Key: ImageStreamTagNotAvailableInfo, Message: fmt.Sprintf("The image trigger for %s will have no effect because %s does not exist but %s points to %s.", dcNode.ResourceString(), istNode.ResourceString(), bcNode.(*buildgraph.BuildConfigNode).ResourceString(), istNode.ResourceString()), }) continue dc } // 3. Image stream tag of interest does not exist markers = append(markers, osgraph.Marker{ Node: uncastDcNode, RelatedNodes: []graph.Node{uncastIstNode}, Severity: osgraph.WarningSeverity, Key: MissingImageStreamTagWarning, Message: fmt.Sprintf("The image trigger for %s will have no effect because %s does not exist.", dcNode.ResourceString(), istNode.ResourceString()), }) continue dc } } } return markers }
// FindPendingTags inspects all imageStreamTags that serve as outputs to builds. // // Precedence of failures: // 1. A build config points to the non existent tag but no current build exists. // 2. A build config points to the non existent tag but the latest build has failed. func FindPendingTags(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { markers := []osgraph.Marker{} for _, uncastIstNode := range g.NodesByKind(imagegraph.ImageStreamTagNodeKind) { istNode := uncastIstNode.(*imagegraph.ImageStreamTagNode) if !istNode.Found() { markers = append(markers, findPendingTagMarkers(istNode, g, f)...) } } return markers }
// FindPendingTags inspects all imageStreamTags that serve as outputs to builds. // // Precedence of failures: // 1. A build config points to the non existent tag but no current build exists. // 2. A build config points to the non existent tag but the latest build has failed. func FindPendingTags(g osgraph.Graph) []osgraph.Marker { markers := []osgraph.Marker{} for _, uncastIstNode := range g.NodesByKind(imagegraph.ImageStreamTagNodeKind) { istNode := uncastIstNode.(*imagegraph.ImageStreamTagNode) if bcNode, points := buildPointsToTag(g, uncastIstNode); points && !istNode.Found() { latestBuild := latestBuild(g, bcNode) // A build config points to the non existent tag but no current build exists. if latestBuild == nil { markers = append(markers, osgraph.Marker{ Node: graph.Node(bcNode), RelatedNodes: []graph.Node{uncastIstNode}, Severity: osgraph.WarningSeverity, Key: TagNotAvailableWarning, Message: fmt.Sprintf("%s needs to be imported or created by a build.", istNode.ResourceString()), Suggestion: osgraph.Suggestion(fmt.Sprintf("oc start-build %s", bcNode.ResourceString())), }) continue } // A build config points to the non existent tag but something is going on with // the latest build. // TODO: Handle other build phases. switch latestBuild.Build.Status.Phase { case buildapi.BuildPhaseCancelled: // TODO: Add a warning here. case buildapi.BuildPhaseError: // TODO: Add a warning here. case buildapi.BuildPhaseComplete: // We should never hit this. The output of our build is missing but the build is complete. // Most probably the user has messed up? case buildapi.BuildPhaseFailed: // Since the tag hasn't been populated yet, we assume there hasn't been a successful // build so far. markers = append(markers, osgraph.Marker{ Node: graph.Node(latestBuild), RelatedNodes: []graph.Node{uncastIstNode, graph.Node(bcNode)}, Severity: osgraph.ErrorSeverity, Key: LatestBuildFailedErr, Message: fmt.Sprintf("%s has failed.", latestBuild.ResourceString()), Suggestion: osgraph.Suggestion(fmt.Sprintf("Inspect the build failure with 'oc logs %s'", latestBuild.ResourceString())), }) default: // Do nothing when latest build is new, pending, or running. } } } return markers }
// FindPortMappingIssues checks all routes and reports any issues related to their ports. // Also non-existent services for routes are reported here. func FindPortMappingIssues(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { markers := []osgraph.Marker{} for _, uncastRouteNode := range g.NodesByKind(routegraph.RouteNodeKind) { routeNode := uncastRouteNode.(*routegraph.RouteNode) marker := routePortMarker(g, f, routeNode) if marker != nil { markers = append(markers, *marker) } } return markers }
func FindPersistentVolumeClaimWarnings(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { markers := []osgraph.Marker{} for _, uncastDcNode := range g.NodesByKind(deploygraph.DeploymentConfigNodeKind) { dcNode := uncastDcNode.(*deploygraph.DeploymentConfigNode) marker := pvcMarker(g, f, dcNode) if marker != nil { markers = append(markers, *marker) } } return markers }
// FindDeploymentConfigTriggerErrors checks for possible failures in deployment config // image change triggers. // // Precedence of failures: // 1. The image stream for the tag of interest does not exist. // 2. The image stream tag does not exist. func FindDeploymentConfigTriggerErrors(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { markers := []osgraph.Marker{} for _, uncastDcNode := range g.NodesByKind(deploygraph.DeploymentConfigNodeKind) { dcNode := uncastDcNode.(*deploygraph.DeploymentConfigNode) marker := ictMarker(g, f, dcNode) if marker != nil { markers = append(markers, *marker) } } return markers }
// hasUnresolvedImageStreamTag checks all build configs that will output to an IST backed by an ImageStream and checks to make sure their builds can push. func hasUnresolvedImageStreamTag(g osgraph.Graph) bool { for _, bcNode := range g.NodesByKind(buildgraph.BuildConfigNodeKind) { for _, istNode := range g.SuccessorNodesByEdgeKind(bcNode, buildedges.BuildOutputEdgeKind) { for _, uncastImageStreamNode := range g.SuccessorNodesByEdgeKind(istNode, imageedges.ReferencedImageStreamGraphEdgeKind) { imageStreamNode := uncastImageStreamNode.(*imagegraph.ImageStreamNode) if len(imageStreamNode.Status.DockerImageRepository) == 0 { return true } } } } return false }
// AllReplicationControllers returns all the ReplicationControllers that aren't in the excludes set and the set of covered NodeIDs func AllReplicationControllers(g osgraph.Graph, excludeNodeIDs IntSet) ([]ReplicationController, IntSet) { covered := IntSet{} rcViews := []ReplicationController{} for _, uncastNode := range g.NodesByKind(kubegraph.ReplicationControllerNodeKind) { if excludeNodeIDs.Has(uncastNode.ID()) { continue } rcView, covers := NewReplicationController(g, uncastNode.(*kubegraph.ReplicationControllerNode)) covered.Insert(covers.List()...) rcViews = append(rcViews, rcView) } return rcViews, covered }
// AllPetSets returns all the PetSets that aren't in the excludes set and the set of covered NodeIDs func AllPetSets(g osgraph.Graph, excludeNodeIDs IntSet) ([]PetSet, IntSet) { covered := IntSet{} views := []PetSet{} for _, uncastNode := range g.NodesByKind(kubegraph.PetSetNodeKind) { if excludeNodeIDs.Has(uncastNode.ID()) { continue } view, covers := NewPetSet(g, uncastNode.(*kubegraph.PetSetNode)) covered.Insert(covers.List()...) views = append(views, view) } return views, covered }
// AllPods returns all Pods and the set of covered NodeIDs func AllPods(g osgraph.Graph, excludeNodeIDs IntSet) ([]Pod, IntSet) { covered := IntSet{} pods := []Pod{} for _, uncastNode := range g.NodesByKind(kubegraph.PodNodeKind) { if excludeNodeIDs.Has(uncastNode.ID()) { continue } pod, covers := NewPod(g, uncastNode.(*kubegraph.PodNode)) covered.Insert(covers.List()...) pods = append(pods, pod) } return pods, covered }
// AllServiceGroups returns all the ServiceGroups that aren't in the excludes set and the set of covered NodeIDs func AllServiceGroups(g osgraph.Graph, excludeNodeIDs IntSet) ([]ServiceGroup, IntSet) { covered := IntSet{} services := []ServiceGroup{} for _, uncastNode := range g.NodesByKind(kubegraph.ServiceNodeKind) { if excludeNodeIDs.Has(uncastNode.ID()) { continue } service, covers := NewServiceGroup(g, uncastNode.(*kubegraph.ServiceNode)) covered.Insert(covers.List()...) services = append(services, service) } sort.Sort(ServiceGroupByObjectMeta(services)) return services, covered }
// FindMissingInputImageStreams checks all build configs and confirms that their From element exists // // Precedence of failures: // 1. A build config's input points to an image stream that does not exist // 2. A build config's input uses an image stream tag reference in an existing image stream, but no images within the image stream have that tag assigned // 3. A build config's input uses an image stream image reference in an exisiting image stream, but no images within the image stream have the supplied image hexadecimal ID func FindMissingInputImageStreams(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { markers := []osgraph.Marker{} for _, bcNode := range g.NodesByKind(buildgraph.BuildConfigNodeKind) { for _, bcInputNode := range g.PredecessorNodesByEdgeKind(bcNode, buildedges.BuildInputImageEdgeKind) { switch bcInputNode.(type) { case *imagegraph.ImageStreamTagNode: for _, uncastImageStreamNode := range g.SuccessorNodesByEdgeKind(bcInputNode, imageedges.ReferencedImageStreamGraphEdgeKind) { imageStreamNode := uncastImageStreamNode.(*imagegraph.ImageStreamNode) // note, BuildConfig.Spec.BuildSpec.Strategy.[Docker|Source|Custom]Stragegy.From Input of ImageStream has been converted to ImageStreamTag on the vX to api conversion // prior to our reaching this point in the code; so there is not need to check for that type vs. ImageStreamTag or ImageStreamImage; tagNode, _ := bcInputNode.(*imagegraph.ImageStreamTagNode) imageStream := imageStreamNode.Object().(*imageapi.ImageStream) if _, ok := imageStream.Status.Tags[tagNode.ImageTag()]; !ok { markers = append(markers, getImageStreamTagMarker(g, f, bcInputNode, imageStreamNode, tagNode, bcNode)) } } case *imagegraph.ImageStreamImageNode: for _, uncastImageStreamNode := range g.SuccessorNodesByEdgeKind(bcInputNode, imageedges.ReferencedImageStreamImageGraphEdgeKind) { imageStreamNode := uncastImageStreamNode.(*imagegraph.ImageStreamNode) imageNode, _ := bcInputNode.(*imagegraph.ImageStreamImageNode) imageStream := imageStreamNode.Object().(*imageapi.ImageStream) found, imageID := validImageStreamImage(imageNode, imageStream) if !found { markers = append(markers, getImageStreamImageMarker(g, f, bcNode, bcInputNode, imageStreamNode, imageNode, imageStream, imageID)) } } } } } return markers }
// AllDeploymentConfigPipelines returns all the DCPipelines that aren't in the excludes set and the set of covered NodeIDs func AllDeploymentConfigPipelines(g osgraph.Graph, excludeNodeIDs IntSet) ([]DeploymentConfigPipeline, IntSet) { covered := IntSet{} dcPipelines := []DeploymentConfigPipeline{} for _, uncastNode := range g.NodesByKind(deploygraph.DeploymentConfigNodeKind) { if excludeNodeIDs.Has(uncastNode.ID()) { continue } pipeline, covers := NewDeploymentConfigPipeline(g, uncastNode.(*deploygraph.DeploymentConfigNode)) covered.Insert(covers.List()...) dcPipelines = append(dcPipelines, pipeline) } sort.Sort(SortedDeploymentConfigPipeline(dcPipelines)) return dcPipelines, covered }
func AllImagePipelinesFromBuildConfig(g osgraph.Graph, excludeNodeIDs IntSet) ([]ImagePipeline, IntSet) { covered := IntSet{} pipelines := []ImagePipeline{} for _, uncastNode := range g.NodesByKind(buildgraph.BuildConfigNodeKind) { if excludeNodeIDs.Has(uncastNode.ID()) { continue } pipeline, covers := NewImagePipelineFromBuildConfigNode(g, uncastNode.(*buildgraph.BuildConfigNode)) covered.Insert(covers.List()...) pipelines = append(pipelines, pipeline) } sort.Sort(SortedImagePipelines(pipelines)) return pipelines, covered }
// FindUnpushableBuildConfigs checks all build configs that will output to an IST backed by an ImageStream and checks to make sure their builds can push. func FindUnpushableBuildConfigs(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { markers := []osgraph.Marker{} // note, unlike with Inputs, ImageStreamImage is not a valid type for build output bc: for _, bcNode := range g.NodesByKind(buildgraph.BuildConfigNodeKind) { for _, istNode := range g.SuccessorNodesByEdgeKind(bcNode, buildedges.BuildOutputEdgeKind) { for _, uncastImageStreamNode := range g.SuccessorNodesByEdgeKind(istNode, imageedges.ReferencedImageStreamGraphEdgeKind) { imageStreamNode := uncastImageStreamNode.(*imagegraph.ImageStreamNode) if !imageStreamNode.IsFound { markers = append(markers, osgraph.Marker{ Node: bcNode, RelatedNodes: []graph.Node{istNode}, Severity: osgraph.ErrorSeverity, Key: MissingOutputImageStreamErr, Message: fmt.Sprintf("%s is pushing to %s, but the image stream for that tag does not exist.", f.ResourceName(bcNode), f.ResourceName(istNode)), }) continue } if len(imageStreamNode.Status.DockerImageRepository) == 0 { markers = append(markers, osgraph.Marker{ Node: bcNode, RelatedNodes: []graph.Node{istNode}, Severity: osgraph.ErrorSeverity, Key: MissingRequiredRegistryErr, Message: fmt.Sprintf("%s is pushing to %s, but the administrator has not configured the integrated Docker registry.", f.ResourceName(bcNode), f.ResourceName(istNode)), Suggestion: osgraph.Suggestion("oc adm registry -h"), }) continue bc } } } } return markers }
func FindMissingTLSTerminationType(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { markers := []osgraph.Marker{} for _, uncastRouteNode := range g.NodesByKind(routegraph.RouteNodeKind) { routeNode := uncastRouteNode.(*routegraph.RouteNode) if routeNode.Spec.TLS != nil && len(routeNode.Spec.TLS.Termination) == 0 { markers = append(markers, osgraph.Marker{ Node: routeNode, Severity: osgraph.ErrorSeverity, Key: MissingTLSTerminationTypeErr, Message: fmt.Sprintf("%s has a TLS configuration but no termination type specified.", f.ResourceName(routeNode)), Suggestion: osgraph.Suggestion(fmt.Sprintf("oc patch %s -p '{\"spec\":{\"tls\":{\"termination\":\"<type>\"}}}' (replace <type> with a valid termination type: edge, passthrough, reencrypt)", f.ResourceName(routeNode)))}) } } return markers }
// FindHPASpecsMissingCPUTargets scans the graph in search of HorizontalPodAutoscalers that are missing a CPU utilization target. // As of right now, the only metric that HPAs can use to scale pods is the CPU utilization, so if a HPA is missing this target it // is effectively useless. func FindHPASpecsMissingCPUTargets(graph osgraph.Graph, namer osgraph.Namer) []osgraph.Marker { markers := []osgraph.Marker{} for _, uncastNode := range graph.NodesByKind(kubenodes.HorizontalPodAutoscalerNodeKind) { node := uncastNode.(*kubenodes.HorizontalPodAutoscalerNode) if node.HorizontalPodAutoscaler.Spec.CPUUtilization == nil { markers = append(markers, osgraph.Marker{ Node: node, Severity: osgraph.ErrorSeverity, Key: HPAMissingCPUTargetError, Message: fmt.Sprintf("%s is missing a CPU utilization target", namer.ResourceName(node)), Suggestion: osgraph.Suggestion(fmt.Sprintf(`oc patch %s -p '{"spec":{"cpuUtilization":{"targetPercentage": 80}}}'`, namer.ResourceName(node))), }) } } return markers }
func AllImagePipelinesFromBuildConfig(g osgraph.Graph, excludeNodeIDs IntSet) ([]ImagePipeline, IntSet) { covered := IntSet{} pipelines := []ImagePipeline{} for _, uncastNode := range g.NodesByKind(buildgraph.BuildConfigNodeKind) { if excludeNodeIDs.Has(uncastNode.ID()) { continue } pipeline, covers := NewImagePipelineFromBuildConfigNode(g, uncastNode.(*buildgraph.BuildConfigNode)) covered.Insert(covers.List()...) pipelines = append(pipelines, pipeline) } sort.Sort(SortedImagePipelines(pipelines)) outputImageToBCMap := make(map[string][]string) for _, pipeline := range pipelines { // note, bc does not have to have an output image if pipeline.Image != nil { bcs, ok := outputImageToBCMap[pipeline.Image.ImageSpec()] if !ok { bcs = []string{} } bcs = append(bcs, pipeline.Build.BuildConfig.Name) outputImageToBCMap[pipeline.Image.ImageSpec()] = bcs } } if len(outputImageToBCMap) > 0 { for i, pipeline := range pipelines { // note, bc does not have to have an input strategy image if pipeline.BaseImage != nil { baseBCs, ok := outputImageToBCMap[pipeline.BaseImage.ImageSpec()] if ok && len(baseBCs) > 0 { pipelines[i].BaseBuilds = baseBCs } } } } return pipelines, covered }
func describeBadPodSpecs(out io.Writer, g osgraph.Graph) ([]string, []*kubegraph.SecretNode) { allMissingSecrets := []*kubegraph.SecretNode{} lines := []string{} for _, uncastPodSpec := range g.NodesByKind(kubegraph.PodSpecNodeKind) { podSpecNode := uncastPodSpec.(*kubegraph.PodSpecNode) unmountableSecrets, missingSecrets := kubeanalysis.CheckMountedSecrets(g, podSpecNode) containingNode := osgraph.GetTopLevelContainerNode(g, podSpecNode) allMissingSecrets = append(allMissingSecrets, missingSecrets...) unmountableNames := []string{} for _, secret := range unmountableSecrets { unmountableNames = append(unmountableNames, secret.ResourceString()) } missingNames := []string{} for _, secret := range missingSecrets { missingNames = append(missingNames, secret.ResourceString()) } containingNodeName := g.GraphDescriber.Name(containingNode) if resourceNode, ok := containingNode.(osgraph.ResourceNode); ok { containingNodeName = resourceNode.ResourceString() } switch { case len(unmountableSecrets) > 0 && len(missingSecrets) > 0: lines = append(lines, fmt.Sprintf("\t%s is not allowed to mount %s and wants to mount these missing secrets %s", containingNodeName, strings.Join(unmountableNames, ","), strings.Join(missingNames, ","))) case len(unmountableSecrets) > 0: lines = append(lines, fmt.Sprintf("\t%s is not allowed to mount %s", containingNodeName, strings.Join(unmountableNames, ","))) case len(unmountableSecrets) > 0 && len(missingSecrets) > 0: lines = append(lines, fmt.Sprintf("\t%s wants to mount these missing secrets %s", containingNodeName, strings.Join(missingNames, ","))) } } // if we had any failures, prepend the warning line if len(lines) > 0 { return append([]string{"Warning: some requested secrets are not allowed:"}, lines...), allMissingSecrets } return []string{}, allMissingSecrets }
func FindPathBasedPassthroughRoutes(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { markers := []osgraph.Marker{} for _, uncastRouteNode := range g.NodesByKind(routegraph.RouteNodeKind) { routeNode := uncastRouteNode.(*routegraph.RouteNode) if len(routeNode.Spec.Path) > 0 && routeNode.Spec.TLS != nil && routeNode.Spec.TLS.Termination == routeapi.TLSTerminationPassthrough { markers = append(markers, osgraph.Marker{ Node: routeNode, Severity: osgraph.ErrorSeverity, Key: PathBasedPassthroughErr, Message: fmt.Sprintf("%s is path-based and uses passthrough termination, which is an invalid combination.", f.ResourceName(routeNode)), Suggestion: osgraph.Suggestion(fmt.Sprintf("1. use spec.tls.termination=edge or 2. use spec.tls.termination=reencrypt and specify spec.tls.destinationCACertificate or 3. remove spec.path")), }) } } return markers }
// FindMissingRouter creates markers for all routes in case there is no running router. func FindMissingRouter(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { markers := []osgraph.Marker{} for _, uncastRouteNode := range g.NodesByKind(routegraph.RouteNodeKind) { routeNode := uncastRouteNode.(*routegraph.RouteNode) if len(routeNode.Route.Status.Ingress) == 0 { markers = append(markers, osgraph.Marker{ Node: routeNode, Severity: osgraph.ErrorSeverity, Key: MissingRequiredRouterErr, Message: fmt.Sprintf("%s is routing traffic to svc/%s, but either the administrator has not installed a router or the router is not selecting this route.", f.ResourceName(routeNode), routeNode.Spec.To.Name), Suggestion: osgraph.Suggestion("oc adm router -h"), }) } } return markers }
// FindMissingPortMapping checks all routes and reports those that don't specify a port while // the service they are routing to, has multiple ports. Also if a service for a route doesn't // exist, will be reported. func FindMissingPortMapping(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { markers := []osgraph.Marker{} route: for _, uncastRouteNode := range g.NodesByKind(routegraph.RouteNodeKind) { for _, uncastServiceNode := range g.SuccessorNodesByEdgeKind(uncastRouteNode, routeedges.ExposedThroughRouteEdgeKind) { routeNode := uncastRouteNode.(*routegraph.RouteNode) svcNode := uncastServiceNode.(*kubegraph.ServiceNode) if !svcNode.Found() { markers = append(markers, osgraph.Marker{ Node: routeNode, RelatedNodes: []graph.Node{svcNode}, Severity: osgraph.WarningSeverity, Key: MissingServiceWarning, Message: fmt.Sprintf("%s is supposed to route traffic to %s but %s doesn't exist.", f.ResourceName(routeNode), f.ResourceName(svcNode), f.ResourceName(svcNode)), }) continue route } if len(svcNode.Spec.Ports) > 1 && (routeNode.Spec.Port == nil || len(routeNode.Spec.Port.TargetPort.String()) == 0) { markers = append(markers, osgraph.Marker{ Node: routeNode, RelatedNodes: []graph.Node{svcNode}, Severity: osgraph.WarningSeverity, Key: MissingRoutePortWarning, Message: fmt.Sprintf("%s doesn't have a port specified and is routing traffic to %s which uses multiple ports.", f.ResourceName(routeNode), f.ResourceName(svcNode)), }) continue route } } } return markers }