// NewImagePipeline attempts to locate a build flow from the provided node. If no such // build flow can be located, false is returned. func NewImagePipelineFromBuildConfigNode(g osgraph.Graph, bcNode *buildgraph.BuildConfigNode) (ImagePipeline, IntSet) { covered := IntSet{} covered.Insert(bcNode.ID()) flow := ImagePipeline{} base, src, coveredInputs, _ := findBuildInputs(g, bcNode) covered.Insert(coveredInputs.List()...) flow.BaseImage = base flow.Source = src flow.Build = bcNode flow.LastSuccessfulBuild, flow.LastUnsuccessfulBuild, flow.ActiveBuilds = buildedges.RelevantBuilds(g, flow.Build) // we should have at most one for _, buildOutputNode := range g.SuccessorNodesByEdgeKind(bcNode, buildedges.BuildOutputEdgeKind) { // this will handle the imagestream tag case for _, input := range g.SuccessorNodesByEdgeKind(buildOutputNode, imageedges.ReferencedImageStreamGraphEdgeKind) { imageStreamNode := input.(*imagegraph.ImageStreamNode) flow.DestinationResolved = (len(imageStreamNode.Status.DockerImageRepository) != 0) } // TODO handle the DockerImage case } return flow, covered }
// FindHPASpecsMissingScaleRefs finds all Horizontal Pod Autoscalers whose scale reference points to an object that doesn't exist // or that the client does not have the permission to see. func FindHPASpecsMissingScaleRefs(graph osgraph.Graph, namer osgraph.Namer) []osgraph.Marker { markers := []osgraph.Marker{} for _, uncastNode := range graph.NodesByKind(kubenodes.HorizontalPodAutoscalerNodeKind) { node := uncastNode.(*kubenodes.HorizontalPodAutoscalerNode) scaledObjects := graph.SuccessorNodesByEdgeKind( uncastNode, kubegraph.ScalingEdgeKind, ) if len(scaledObjects) < 1 { markers = append(markers, createMissingScaleRefMarker(node, nil, namer)) continue } for _, scaleRef := range scaledObjects { if existenceChecker, ok := scaleRef.(osgraph.ExistenceChecker); ok && !existenceChecker.Found() { // if this node is synthetic, we can't be sure that the HPA is scaling something that actually exists markers = append(markers, createMissingScaleRefMarker(node, scaleRef, namer)) } } } return markers }
// BuildConfigsForTag returns the buildConfig that points to the provided imageStreamTag. func BuildConfigsForTag(g osgraph.Graph, istag graph.Node) []*buildgraph.BuildConfigNode { bcs := []*buildgraph.BuildConfigNode{} for _, bcNode := range g.PredecessorNodesByEdgeKind(istag, BuildOutputEdgeKind) { bcs = append(bcs, bcNode.(*buildgraph.BuildConfigNode)) } return bcs }
func addImageStreamsToGraph(g graph.Graph, streams *imageapi.ImageStreamList) { for i := range streams.Items { stream := &streams.Items[i] glog.V(4).Infof("Adding ImageStream %s/%s to graph", stream.Namespace, stream.Name) isNode := imagegraph.EnsureImageStreamNode(g, stream) imageStreamNode := isNode.(*imagegraph.ImageStreamNode) // connect IS with underlying images for tag, history := range stream.Status.Tags { for i := range history.Items { image := history.Items[i] n := imagegraph.FindImage(g, image.Image) if n == nil { glog.V(2).Infof("Unable to find image %q in graph (from tag=%q, dockerImageReference=%s)", history.Items[i].Image, tag, image.DockerImageReference) continue } imageNode := n.(*imagegraph.ImageNode) glog.V(4).Infof("Adding edge from %q to %q", imageStreamNode.UniqueName(), imageNode.UniqueName()) edgeKind := ImageStreamImageEdgeKind if i > 1 { edgeKind = HistoricImageStreamImageEdgeKind } g.AddEdge(imageStreamNode, imageNode, edgeKind) } } } }
func getImageStreamSize(g graph.Graph, node *imagegraph.ImageStreamNode) (int64, int, int) { imageEdges := g.OutboundEdges(node, ImageStreamImageEdgeKind) storage := int64(0) images := len(imageEdges) layers := 0 blobSet := sets.NewString() for _, e := range imageEdges { imageNode, ok := e.To().(*imagegraph.ImageNode) if !ok { continue } image := imageNode.Image layers += len(image.DockerImageLayers) // we're counting only unique layers per the entire stream for _, layer := range image.DockerImageLayers { if blobSet.Has(layer.Name) { continue } blobSet.Insert(layer.Name) storage += layer.LayerSize } if len(image.DockerImageConfig) > 0 && !blobSet.Has(image.DockerImageMetadata.ID) { blobSet.Insert(image.DockerImageMetadata.ID) storage += int64(len(image.DockerImageConfig)) } } return storage, images, layers }
func addPodSpecToGraph(g graph.Graph, spec *kapi.PodSpec, predecessor gonum.Node) { for j := range spec.Containers { container := spec.Containers[j] glog.V(4).Infof("Examining container image %q", container.Image) ref, err := imageapi.ParseDockerImageReference(container.Image) if err != nil { glog.V(2).Infof("Unable to parse DockerImageReference %q: %v - skipping", container.Image, err) continue } if len(ref.ID) == 0 { // ignore not managed images continue } imageNode := imagegraph.FindImage(g, ref.ID) if imageNode == nil { glog.V(1).Infof("Unable to find image %q in the graph", ref.ID) continue } glog.V(4).Infof("Adding edge from %v to %v", predecessor, imageNode) g.AddEdge(predecessor, imageNode, PodImageEdgeKind) } }
func addImagesToGraph(g graph.Graph, images *imageapi.ImageList) { for i := range images.Items { image := &images.Items[i] glog.V(4).Infof("Adding image %q to graph", image.Name) imageNode := imagegraph.EnsureImageNode(g, image) topLayerAdded := false // We're looking through layers in reversed order since we need to // find first layer (from top) which is not an empty layer, we're omitting // empty layers because every image has those and they're giving us // false positives about parents. This applies only to schema v1 images // schema v2 does not have that problem. for i := len(image.DockerImageLayers) - 1; i >= 0; i-- { layer := image.DockerImageLayers[i] layerNode := imagegraph.EnsureImageLayerNode(g, layer.Name) edgeKind := ImageLayerEdgeKind if !topLayerAdded && layer.Name != digest.DigestSha256EmptyTar { edgeKind = ImageTopLayerEdgeKind topLayerAdded = true } g.AddEdge(imageNode, layerNode, edgeKind) glog.V(4).Infof("Adding image layer %q to graph (%q)", layer.Name, edgeKind) } } }
// FindUnmountableSecrets inspects all PodSpecs for any Secret reference that isn't listed as mountable by the referenced ServiceAccount func FindUnmountableSecrets(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { markers := []osgraph.Marker{} for _, uncastPodSpecNode := range g.NodesByKind(kubegraph.PodSpecNodeKind) { podSpecNode := uncastPodSpecNode.(*kubegraph.PodSpecNode) unmountableSecrets := CheckForUnmountableSecrets(g, podSpecNode) topLevelNode := osgraph.GetTopLevelContainerNode(g, podSpecNode) topLevelString := f.ResourceName(topLevelNode) saString := "MISSING_SA" saNodes := g.SuccessorNodesByEdgeKind(podSpecNode, kubeedges.ReferencedServiceAccountEdgeKind) if len(saNodes) > 0 { saString = f.ResourceName(saNodes[0]) } for _, unmountableSecret := range unmountableSecrets { markers = append(markers, osgraph.Marker{ Node: podSpecNode, RelatedNodes: []graph.Node{unmountableSecret}, Severity: osgraph.WarningSeverity, Key: UnmountableSecretWarning, Message: fmt.Sprintf("%s is attempting to mount a secret %s disallowed by %s", topLevelString, f.ResourceName(unmountableSecret), saString), }) } } return markers }
// FindMissingLivenessProbes inspects all PodSpecs for missing liveness probes and generates a list of non-duplicate markers func FindMissingLivenessProbes(g osgraph.Graph, f osgraph.Namer, setProbeCommand string) []osgraph.Marker { markers := []osgraph.Marker{} for _, uncastPodSpecNode := range g.NodesByKind(kubegraph.PodSpecNodeKind) { podSpecNode := uncastPodSpecNode.(*kubegraph.PodSpecNode) if hasLivenessProbe(podSpecNode) { continue } topLevelNode := osgraph.GetTopLevelContainerNode(g, podSpecNode) // skip any podSpec nodes that are managed by other nodes. // Liveness probes should only be applied to a controlling // podSpec node, and not to any of its children. if hasControllerRefEdge(g, topLevelNode) { continue } topLevelString := f.ResourceName(topLevelNode) markers = append(markers, osgraph.Marker{ Node: podSpecNode, RelatedNodes: []graph.Node{topLevelNode}, Severity: osgraph.InfoSeverity, Key: MissingLivenessProbeWarning, Message: fmt.Sprintf("%s has no liveness probe to verify pods are still running.", topLevelString), Suggestion: osgraph.Suggestion(fmt.Sprintf("%s %s --liveness ...", setProbeCommand, topLevelString)), }) } return markers }
// NewReplicationController returns the ReplicationController and a set of all the NodeIDs covered by the ReplicationController func NewReplicationController(g osgraph.Graph, rcNode *kubegraph.ReplicationControllerNode) (ReplicationController, IntSet) { covered := IntSet{} covered.Insert(rcNode.ID()) rcView := ReplicationController{} rcView.RC = rcNode for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, kubeedges.ManagedByRCEdgeKind) { podNode := uncastPodNode.(*kubegraph.PodNode) covered.Insert(podNode.ID()) rcView.OwnedPods = append(rcView.OwnedPods, podNode) // check to see if this pod is managed by more than one RC uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, kubeedges.ManagedByRCEdgeKind) if len(uncastOwningRCs) > 1 { for _, uncastOwningRC := range uncastOwningRCs { if uncastOwningRC.ID() == rcNode.ID() { continue } conflictingRC := uncastOwningRC.(*kubegraph.ReplicationControllerNode) rcView.ConflictingRCs = append(rcView.ConflictingRCs, conflictingRC) conflictingPods, ok := rcView.ConflictingRCIDToPods[conflictingRC.ID()] if !ok { conflictingPods = []*kubegraph.PodNode{} } conflictingPods = append(conflictingPods, podNode) rcView.ConflictingRCIDToPods[conflictingRC.ID()] = conflictingPods } } } return rcView, covered }
// addImagesToGraph adds all images to the graph that belong to one of the // registries in the algorithm and are at least as old as the minimum age // threshold as specified by the algorithm. It also adds all the images' layers // to the graph. func addImagesToGraph(g graph.Graph, images *imageapi.ImageList, algorithm pruneAlgorithm) { for i := range images.Items { image := &images.Items[i] glog.V(4).Infof("Examining image %q", image.Name) if image.Annotations == nil { glog.V(4).Infof("Image %q with DockerImageReference %q belongs to an external registry - skipping", image.Name, image.DockerImageReference) continue } if value, ok := image.Annotations[imageapi.ManagedByOpenShiftAnnotation]; !ok || value != "true" { glog.V(4).Infof("Image %q with DockerImageReference %q belongs to an external registry - skipping", image.Name, image.DockerImageReference) continue } age := unversioned.Now().Sub(image.CreationTimestamp.Time) if !algorithm.pruneOverSizeLimit && age < algorithm.keepYoungerThan { glog.V(4).Infof("Image %q is younger than minimum pruning age, skipping (age=%v)", image.Name, age) continue } glog.V(4).Infof("Adding image %q to graph", image.Name) imageNode := imagegraph.EnsureImageNode(g, image) for _, layer := range image.DockerImageLayers { glog.V(4).Infof("Adding image layer %q to graph", layer.Name) layerNode := imagegraph.EnsureImageLayerNode(g, layer.Name) g.AddEdge(imageNode, layerNode, ReferencedImageLayerEdgeKind) } } }
func AddAllVolumeClaimEdges(g osgraph.Graph) { for _, node := range g.Nodes() { if dcNode, ok := node.(*deploygraph.DeploymentConfigNode); ok { AddVolumeClaimEdges(g, dcNode) } } }
// NewDeploymentConfigPipeline returns the DeploymentConfigPipeline and a set of all the NodeIDs covered by the DeploymentConfigPipeline func NewDeploymentConfigPipeline(g osgraph.Graph, dcNode *deploygraph.DeploymentConfigNode) (DeploymentConfigPipeline, IntSet) { covered := IntSet{} covered.Insert(dcNode.ID()) dcPipeline := DeploymentConfigPipeline{} dcPipeline.Deployment = dcNode // for everything that can trigger a deployment, create an image pipeline and add it to the list for _, istNode := range g.PredecessorNodesByEdgeKind(dcNode, deployedges.TriggersDeploymentEdgeKind) { imagePipeline, covers := NewImagePipelineFromImageTagLocation(g, istNode, istNode.(ImageTagLocation)) covered.Insert(covers.List()...) dcPipeline.Images = append(dcPipeline.Images, imagePipeline) } // for image that we use, create an image pipeline and add it to the list for _, tagNode := range g.PredecessorNodesByEdgeKind(dcNode, deployedges.UsedInDeploymentEdgeKind) { imagePipeline, covers := NewImagePipelineFromImageTagLocation(g, tagNode, tagNode.(ImageTagLocation)) covered.Insert(covers.List()...) dcPipeline.Images = append(dcPipeline.Images, imagePipeline) } dcPipeline.ActiveDeployment, dcPipeline.InactiveDeployments = deployedges.RelevantDeployments(g, dcNode) return dcPipeline, covered }
// NewImagePipelineFromImageTagLocation returns the ImagePipeline and all the nodes contributing to it func NewImagePipelineFromImageTagLocation(g osgraph.Graph, node graph.Node, imageTagLocation ImageTagLocation) (ImagePipeline, IntSet) { covered := IntSet{} covered.Insert(node.ID()) flow := ImagePipeline{} flow.Image = imageTagLocation for _, input := range g.PredecessorNodesByEdgeKind(node, buildedges.BuildOutputEdgeKind) { covered.Insert(input.ID()) build := input.(*buildgraph.BuildConfigNode) if flow.Build != nil { // report this as an error (unexpected duplicate input build) } if build.BuildConfig == nil { // report this as as a missing build / broken link break } base, src, coveredInputs, _ := findBuildInputs(g, build) covered.Insert(coveredInputs.List()...) flow.Build = build flow.BaseImage = base flow.Source = src } return flow, covered }
func AddAllMountedSecretEdges(g osgraph.Graph) { for _, node := range g.Nodes() { if podSpecNode, ok := node.(*kubegraph.PodSpecNode); ok { AddMountedSecretEdges(g, podSpecNode) } } }
func AddMountedSecretEdges(g osgraph.Graph, podSpec *kubegraph.PodSpecNode) { //pod specs are always contained. We'll get the toplevel container so that we can pull a namespace from it containerNode := osgraph.GetTopLevelContainerNode(g, podSpec) containerObj := g.GraphDescriber.Object(containerNode) meta, err := kapi.ObjectMetaFor(containerObj.(runtime.Object)) if err != nil { // this should never happen. it means that a podSpec is owned by a top level container that is not a runtime.Object panic(err) } for _, volume := range podSpec.Volumes { source := volume.VolumeSource if source.Secret == nil { continue } // pod secrets must be in the same namespace syntheticSecret := &kapi.Secret{} syntheticSecret.Namespace = meta.Namespace syntheticSecret.Name = source.Secret.SecretName secretNode := kubegraph.FindOrCreateSyntheticSecretNode(g, syntheticSecret) g.AddEdge(podSpec, secretNode, MountedSecretEdgeKind) } }
// Edges are added to the graph from each predecessor (pod or replication // controller) to the images specified by the pod spec's list of containers, as // long as the image is managed by OpenShift. func addPodSpecToGraph(g graph.Graph, spec *kapi.PodSpec, predecessor gonum.Node) { for j := range spec.Containers { container := spec.Containers[j] glog.V(4).Infof("Examining container image %q", container.Image) ref, err := imageapi.ParseDockerImageReference(container.Image) if err != nil { util.HandleError(fmt.Errorf("unable to parse DockerImageReference %q: %v", container.Image, err)) continue } if len(ref.ID) == 0 { glog.V(4).Infof("%q has no image ID", container.Image) continue } imageNode := imagegraph.FindImage(g, ref.ID) if imageNode == nil { glog.Infof("Unable to find image %q in the graph", ref.ID) continue } glog.V(4).Infof("Adding edge from pod to image") g.AddEdge(predecessor, imageNode, ReferencedImageEdgeKind) } }
// FindRestartingPods inspects all Pods to see if they've restarted more than the threshold func FindRestartingPods(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { markers := []osgraph.Marker{} for _, uncastPodNode := range g.NodesByKind(kubegraph.PodNodeKind) { podNode := uncastPodNode.(*kubegraph.PodNode) pod, ok := podNode.Object().(*kapi.Pod) if !ok { continue } for _, containerStatus := range pod.Status.ContainerStatuses { if containerStatus.RestartCount >= RestartThreshold { markers = append(markers, osgraph.Marker{ Node: podNode, Severity: osgraph.WarningSeverity, Key: RestartingPodWarning, Message: fmt.Sprintf("container %q in %s has restarted %d times", containerStatus.Name, f.ResourceName(podNode), containerStatus.RestartCount), }) } } } return markers }
// NewServiceGroup returns the ServiceGroup and a set of all the NodeIDs covered by the service service func NewServiceGroup(g osgraph.Graph, serviceNode *kubegraph.ServiceNode) (ServiceGroup, IntSet) { covered := IntSet{} covered.Insert(serviceNode.ID()) service := ServiceGroup{} service.Service = serviceNode for _, uncastServiceFulfiller := range g.PredecessorNodesByEdgeKind(serviceNode, kubeedges.ExposedThroughServiceEdgeKind) { container := osgraph.GetTopLevelContainerNode(g, uncastServiceFulfiller) switch castContainer := container.(type) { case *deploygraph.DeploymentConfigNode: service.FulfillingDCs = append(service.FulfillingDCs, castContainer) case *kubegraph.ReplicationControllerNode: service.FulfillingRCs = append(service.FulfillingRCs, castContainer) case *kubegraph.PodNode: service.FulfillingPods = append(service.FulfillingPods, castContainer) default: util.HandleError(fmt.Errorf("unrecognized container: %v", castContainer)) } } // add the DCPipelines for all the DCs that fulfill the service for _, fulfillingDC := range service.FulfillingDCs { dcPipeline, dcCovers := NewDeploymentConfigPipeline(g, fulfillingDC) covered.Insert(dcCovers.List()...) service.DeploymentConfigPipelines = append(service.DeploymentConfigPipelines, dcPipeline) } return service, covered }
// FindDeploymentConfigReadinessWarnings inspects deploymentconfigs and reports those that // don't have readiness probes set up. func FindDeploymentConfigReadinessWarnings(g osgraph.Graph, f osgraph.Namer, setProbeCommand string) []osgraph.Marker { markers := []osgraph.Marker{} Node: for _, uncastDcNode := range g.NodesByKind(deploygraph.DeploymentConfigNodeKind) { dcNode := uncastDcNode.(*deploygraph.DeploymentConfigNode) if t := dcNode.DeploymentConfig.Spec.Template; t != nil && len(t.Spec.Containers) > 0 { for _, container := range t.Spec.Containers { if container.ReadinessProbe != nil { continue Node } } // All of the containers in the deployment config lack a readiness probe markers = append(markers, osgraph.Marker{ Node: uncastDcNode, Severity: osgraph.WarningSeverity, Key: MissingReadinessProbeWarning, Message: fmt.Sprintf("%s has no readiness probe to verify pods are ready to accept traffic or ensure deployment is successful.", f.ResourceName(dcNode)), Suggestion: osgraph.Suggestion(fmt.Sprintf("%s %s --readiness ...", setProbeCommand, f.ResourceName(dcNode))), }) continue Node } } return markers }
// partitionReverse the graph down to a subgraph starting from the given root func partitionReverse(g osgraph.Graph, root graph.Node, buildInputEdgeKinds []string) osgraph.Graph { // Filter out all but BuildConfig and ImageStreamTag nodes nodeFn := osgraph.NodesOfKind(buildgraph.BuildConfigNodeKind, imagegraph.ImageStreamTagNodeKind) // Filter out all but BuildInputImage and BuildOutput edges edgeKinds := []string{} edgeKinds = append(edgeKinds, buildInputEdgeKinds...) edgeKinds = append(edgeKinds, buildedges.BuildOutputEdgeKind) edgeFn := osgraph.EdgesOfKind(edgeKinds...) sub := g.Subgraph(nodeFn, edgeFn) // Filter out inbound edges to the IST of interest edgeFn = osgraph.RemoveOutboundEdges([]graph.Node{root}) sub = sub.Subgraph(nodeFn, edgeFn) // Check all paths leading from the root node, collect any // node found in them, and create the desired subgraph desired := []graph.Node{root} paths := path.DijkstraAllPaths(sub) for _, node := range sub.Nodes() { if node == root { continue } path, _, _ := paths.Between(node, root) if len(path) != 0 { desired = append(desired, node) } } return sub.SubgraphWithNodes(desired, osgraph.ExistingDirectEdge) }
// FindCircularBuilds checks all build configs for cycles func FindCircularBuilds(g osgraph.Graph) []osgraph.Marker { // Filter out all but ImageStreamTag and BuildConfig nodes nodeFn := osgraph.NodesOfKind(imagegraph.ImageStreamTagNodeKind, buildgraph.BuildConfigNodeKind) // Filter out all but BuildInputImage and BuildOutput edges edgeFn := osgraph.EdgesOfKind(buildedges.BuildInputImageEdgeKind, buildedges.BuildOutputEdgeKind) // Create desired subgraph sub := g.Subgraph(nodeFn, edgeFn) markers := []osgraph.Marker{} // Check for cycles for _, cycle := range topo.CyclesIn(sub) { nodeNames := []string{} for _, node := range cycle { if resourceStringer, ok := node.(osgraph.ResourceNode); ok { nodeNames = append(nodeNames, resourceStringer.ResourceString()) } } markers = append(markers, osgraph.Marker{ Node: cycle[0], RelatedNodes: cycle, Severity: osgraph.WarningSeverity, Key: CyclicBuildConfigWarning, Message: fmt.Sprintf("Cycle detected in build configurations: %s", strings.Join(nodeNames, " -> ")), }) } return markers }
func findBuildOutput(g osgraph.Graph, bcNode *buildgraph.BuildConfigNode) (result ImageTagLocation) { for _, output := range g.SuccessorNodesByEdgeKind(bcNode, buildedges.BuildOutputEdgeKind) { result = output.(ImageTagLocation) return } return }
// NewImagePipelineFromImageTagLocation returns the ImagePipeline and all the nodes contributing to it func NewImagePipelineFromImageTagLocation(g osgraph.Graph, node graph.Node, imageTagLocation ImageTagLocation) (ImagePipeline, IntSet) { covered := IntSet{} covered.Insert(node.ID()) flow := ImagePipeline{} flow.Image = imageTagLocation for _, input := range g.PredecessorNodesByEdgeKind(node, buildedges.BuildOutputEdgeKind) { covered.Insert(input.ID()) build := input.(*buildgraph.BuildConfigNode) if flow.Build != nil { // report this as an error (unexpected duplicate input build) } if build.BuildConfig == nil { // report this as as a missing build / broken link break } base, src, coveredInputs, _ := findBuildInputs(g, build) covered.Insert(coveredInputs.List()...) flow.BaseImage = base flow.Source = src flow.Build = build flow.LastSuccessfulBuild, flow.LastUnsuccessfulBuild, flow.ActiveBuilds = buildedges.RelevantBuilds(g, flow.Build) } for _, input := range g.SuccessorNodesByEdgeKind(node, imageedges.ReferencedImageStreamGraphEdgeKind) { covered.Insert(input.ID()) imageStreamNode := input.(*imagegraph.ImageStreamNode) flow.DestinationResolved = (len(imageStreamNode.Status.DockerImageRepository) != 0) } return flow, covered }
// FindMissingSecrets inspects all PodSpecs for any Secret reference that is a synthetic node (not a pre-existing node in the graph) func FindMissingSecrets(g osgraph.Graph) []osgraph.Marker { markers := []osgraph.Marker{} for _, uncastPodSpecNode := range g.NodesByKind(kubegraph.PodSpecNodeKind) { podSpecNode := uncastPodSpecNode.(*kubegraph.PodSpecNode) missingSecrets := CheckMissingMountedSecrets(g, podSpecNode) topLevelNode := osgraph.GetTopLevelContainerNode(g, podSpecNode) topLevelString := g.Name(topLevelNode) if resourceStringer, ok := topLevelNode.(osgraph.ResourceNode); ok { topLevelString = resourceStringer.ResourceString() } for _, missingSecret := range missingSecrets { markers = append(markers, osgraph.Marker{ Node: podSpecNode, RelatedNodes: []graph.Node{missingSecret}, Severity: osgraph.WarningSeverity, Key: UnmountableSecretWarning, Message: fmt.Sprintf("%s is attempting to mount a missing secret %s", topLevelString, missingSecret.ResourceString()), }) } } return markers }
func AddHPAScaleRefEdges(g osgraph.Graph) { for _, node := range g.NodesByKind(kubegraph.HorizontalPodAutoscalerNodeKind) { hpaNode := node.(*kubegraph.HorizontalPodAutoscalerNode) syntheticMeta := kapi.ObjectMeta{ Name: hpaNode.HorizontalPodAutoscaler.Spec.ScaleRef.Name, Namespace: hpaNode.HorizontalPodAutoscaler.Namespace, } var groupVersionResource unversioned.GroupVersionResource resource := strings.ToLower(hpaNode.HorizontalPodAutoscaler.Spec.ScaleRef.Kind) if groupVersion, err := unversioned.ParseGroupVersion(hpaNode.HorizontalPodAutoscaler.Spec.ScaleRef.APIVersion); err == nil { groupVersionResource = groupVersion.WithResource(resource) } else { groupVersionResource = unversioned.GroupVersionResource{Resource: resource} } groupVersionResource, err := registered.RESTMapper().ResourceFor(groupVersionResource) if err != nil { continue } var syntheticNode graph.Node switch groupVersionResource.GroupResource() { case kapi.Resource("replicationcontrollers"): syntheticNode = kubegraph.FindOrCreateSyntheticReplicationControllerNode(g, &kapi.ReplicationController{ObjectMeta: syntheticMeta}) case deployapi.Resource("deploymentconfigs"): syntheticNode = deploygraph.FindOrCreateSyntheticDeploymentConfigNode(g, &deployapi.DeploymentConfig{ObjectMeta: syntheticMeta}) default: continue } g.AddEdge(hpaNode, syntheticNode, ScalingEdgeKind) } }
func AddAllRequestedServiceAccountEdges(g osgraph.Graph) { for _, node := range g.Nodes() { if podSpecNode, ok := node.(*kubegraph.PodSpecNode); ok { AddRequestedServiceAccountEdges(g, podSpecNode) } } }
func AddAllMountableSecretEdges(g osgraph.Graph) { for _, node := range g.Nodes() { if saNode, ok := node.(*kubegraph.ServiceAccountNode); ok { AddMountableSecretEdges(g, saNode) } } }
func doesImageStreamExist(g osgraph.Graph, istag graph.Node) (graph.Node, bool) { for _, imagestream := range g.SuccessorNodesByEdgeKind(istag, imageedges.ReferencedImageStreamGraphEdgeKind) { return imagestream, imagestream.(*imagegraph.ImageStreamNode).Found() } for _, imagestream := range g.SuccessorNodesByEdgeKind(istag, imageedges.ReferencedImageStreamImageGraphEdgeKind) { return imagestream, imagestream.(*imagegraph.ImageStreamNode).Found() } return nil, false }
func AddMountableSecretEdges(g osgraph.Graph, saNode *kubegraph.ServiceAccountNode) { for _, mountableSecret := range saNode.ServiceAccount.Secrets { syntheticSecret := &kapi.Secret{} syntheticSecret.Namespace = saNode.ServiceAccount.Namespace syntheticSecret.Name = mountableSecret.Name secretNode := kubegraph.FindOrCreateSyntheticSecretNode(g, syntheticSecret) g.AddEdge(saNode, secretNode, MountableSecretEdgeKind) } }