func TestPendingImageStreamTag(t *testing.T) { g, _, err := osgraphtest.BuildGraph("../../../api/graph/test/unpushable-build.yaml") if err != nil { t.Fatalf("unexpected error: %v", err) } buildedges.AddAllInputOutputEdges(g) buildedges.AddAllBuildEdges(g) imageedges.AddAllImageStreamRefEdges(g) imageedges.AddAllImageStreamImageRefEdges(g) // Drop the build to showcase a TagNotAvailable warning (should happen when no // build is new, pending, or running currently) nodeFn := osgraph.NodesOfKind(imagegraph.ImageStreamTagNodeKind, buildgraph.BuildConfigNodeKind) edgeFn := osgraph.EdgesOfKind(buildedges.BuildInputImageEdgeKind, buildedges.BuildOutputEdgeKind) g = g.Subgraph(nodeFn, edgeFn) markers := FindPendingTags(g, osgraph.DefaultNamer) if e, a := 1, len(markers); e != a { t.Fatalf("expected %v, got %v", e, a) } if got, expected := markers[0].Key, TagNotAvailableWarning; got != expected { t.Fatalf("expected marker key %q, got %q", expected, got) } }
// FindCircularBuilds checks all build configs for cycles func FindCircularBuilds(g osgraph.Graph) []osgraph.Marker { // Filter out all but ImageStreamTag and BuildConfig nodes nodeFn := osgraph.NodesOfKind(imagegraph.ImageStreamTagNodeKind, buildgraph.BuildConfigNodeKind) // Filter out all but BuildInputImage and BuildOutput edges edgeFn := osgraph.EdgesOfKind(buildedges.BuildInputImageEdgeKind, buildedges.BuildOutputEdgeKind) // Create desired subgraph sub := g.Subgraph(nodeFn, edgeFn) markers := []osgraph.Marker{} // Check for cycles for _, cycle := range topo.CyclesIn(sub) { nodeNames := []string{} for _, node := range cycle { if resourceStringer, ok := node.(osgraph.ResourceNode); ok { nodeNames = append(nodeNames, resourceStringer.ResourceString()) } } markers = append(markers, osgraph.Marker{ Node: cycle[0], RelatedNodes: cycle, Severity: osgraph.WarningSeverity, Key: CyclicBuildConfigWarning, Message: fmt.Sprintf("Cycle detected in build configurations: %s", strings.Join(nodeNames, " -> ")), }) } return markers }
// partitionReverse the graph down to a subgraph starting from the given root func partitionReverse(g osgraph.Graph, root graph.Node, buildInputEdgeKinds []string) osgraph.Graph { // Filter out all but BuildConfig and ImageStreamTag nodes nodeFn := osgraph.NodesOfKind(buildgraph.BuildConfigNodeKind, imagegraph.ImageStreamTagNodeKind) // Filter out all but BuildInputImage and BuildOutput edges edgeKinds := []string{} edgeKinds = append(edgeKinds, buildInputEdgeKinds...) edgeKinds = append(edgeKinds, buildedges.BuildOutputEdgeKind) edgeFn := osgraph.EdgesOfKind(edgeKinds...) sub := g.Subgraph(nodeFn, edgeFn) // Filter out inbound edges to the IST of interest edgeFn = osgraph.RemoveOutboundEdges([]graph.Node{root}) sub = sub.Subgraph(nodeFn, edgeFn) // Check all paths leading from the root node, collect any // node found in them, and create the desired subgraph desired := []graph.Node{root} paths := path.DijkstraAllPaths(sub) for _, node := range sub.Nodes() { if node == root { continue } path, _, _ := paths.Between(node, root) if len(path) != 0 { desired = append(desired, node) } } return sub.SubgraphWithNodes(desired, osgraph.ExistingDirectEdge) }
// FindOverlappingHPAs scans the graph in search of HorizontalPodAutoscalers that are attempting to scale the same set of pods. // This can occur in two ways: // - 1. label selectors for two ReplicationControllers/DeploymentConfigs/etc overlap // - 2. multiple HorizontalPodAutoscalers are attempting to scale the same ReplicationController/DeploymentConfig/etc // Case 1 is handled by deconflicting the area of influence of ReplicationControllers/DeploymentConfigs/etc, and therefore we // can assume that it will be handled before this step. Therefore, we are only concerned with finding HPAs that are trying to // scale the same resources. // // The algorithm that is used to implement this check is described as follows: // - create a sub-graph containing only HPA nodes and other nodes that can be scaled, as well as any scaling edges or other // edges used to connect between objects that can be scaled // - for every resulting edge in the new sub-graph, create an edge in the reverse direction // - find the shortest paths between all HPA nodes in the graph // - shortest paths connecting two horizontal pod autoscalers are used to create markers for the graph func FindOverlappingHPAs(graph osgraph.Graph, namer osgraph.Namer) []osgraph.Marker { markers := []osgraph.Marker{} nodeFilter := osgraph.NodesOfKind( kubenodes.HorizontalPodAutoscalerNodeKind, kubenodes.ReplicationControllerNodeKind, deploynodes.DeploymentConfigNodeKind, ) edgeFilter := osgraph.EdgesOfKind( kubegraph.ScalingEdgeKind, deploygraph.DeploymentEdgeKind, kubeedges.ManagedByControllerEdgeKind, ) hpaSubGraph := graph.Subgraph(nodeFilter, edgeFilter) for _, edge := range hpaSubGraph.Edges() { osgraph.AddReversedEdge(hpaSubGraph, edge.From(), edge.To(), sets.NewString()) } hpaNodes := hpaSubGraph.NodesByKind(kubenodes.HorizontalPodAutoscalerNodeKind) for _, firstHPA := range hpaNodes { // we can use Dijkstra's algorithm as we know we do not have any negative edge weights shortestPaths := path.DijkstraFrom(firstHPA, hpaSubGraph) for _, secondHPA := range hpaNodes { if firstHPA == secondHPA { continue } shortestPath, _ := shortestPaths.To(secondHPA) if shortestPath == nil { // if two HPAs have no path between them, no error exists continue } markers = append(markers, osgraph.Marker{ Node: firstHPA, Severity: osgraph.WarningSeverity, RelatedNodes: shortestPath[1:], Key: HPAOverlappingScaleRefWarning, Message: fmt.Sprintf("%s and %s overlap because they both attempt to scale %s", namer.ResourceName(firstHPA), namer.ResourceName(secondHPA), nameList(shortestPath[1:len(shortestPath)-1], namer)), }) } } return markers }