Esempio n. 1
0
func findBuildOutput(g osgraph.Graph, bcNode *buildgraph.BuildConfigNode) (result ImageTagLocation) {
	for _, output := range g.SuccessorNodesByEdgeKind(bcNode, buildedges.BuildOutputEdgeKind) {
		result = output.(ImageTagLocation)
		return
	}
	return
}
Esempio n. 2
0
// FindUnmountableSecrets inspects all PodSpecs for any Secret reference that isn't listed as mountable by the referenced ServiceAccount
func FindUnmountableSecrets(g osgraph.Graph) []osgraph.Marker {
	markers := []osgraph.Marker{}

	for _, uncastPodSpecNode := range g.NodesByKind(kubegraph.PodSpecNodeKind) {
		podSpecNode := uncastPodSpecNode.(*kubegraph.PodSpecNode)
		unmountableSecrets := CheckForUnmountableSecrets(g, podSpecNode)

		topLevelNode := osgraph.GetTopLevelContainerNode(g, podSpecNode)
		topLevelString := g.Name(topLevelNode)
		if resourceStringer, ok := topLevelNode.(osgraph.ResourceNode); ok {
			topLevelString = resourceStringer.ResourceString()
		}

		saString := "MISSING_SA"
		saNodes := g.SuccessorNodesByEdgeKind(podSpecNode, kubeedges.ReferencedServiceAccountEdgeKind)
		if len(saNodes) > 0 {
			saString = saNodes[0].(*kubegraph.ServiceAccountNode).ResourceString()
		}

		for _, unmountableSecret := range unmountableSecrets {
			markers = append(markers, osgraph.Marker{
				Node:         podSpecNode,
				RelatedNodes: []graph.Node{unmountableSecret},

				Severity: osgraph.WarningSeverity,
				Key:      UnmountableSecretWarning,
				Message: fmt.Sprintf("%s is attempting to mount a secret %s disallowed by %s",
					topLevelString, unmountableSecret.ResourceString(), saString),
			})
		}
	}

	return markers
}
Esempio n. 3
0
// NewImagePipelineFromImageTagLocation returns the ImagePipeline and all the nodes contributing to it
func NewImagePipelineFromImageTagLocation(g osgraph.Graph, node graph.Node, imageTagLocation ImageTagLocation) (ImagePipeline, IntSet) {
	covered := IntSet{}
	covered.Insert(node.ID())

	flow := ImagePipeline{}
	flow.Image = imageTagLocation

	for _, input := range g.PredecessorNodesByEdgeKind(node, buildedges.BuildOutputEdgeKind) {
		covered.Insert(input.ID())
		build := input.(*buildgraph.BuildConfigNode)
		if flow.Build != nil {
			// report this as an error (unexpected duplicate input build)
		}
		if build.BuildConfig == nil {
			// report this as as a missing build / broken link
			break
		}

		base, src, coveredInputs, _ := findBuildInputs(g, build)
		covered.Insert(coveredInputs.List()...)
		flow.BaseImage = base
		flow.Source = src
		flow.Build = build
		flow.LastSuccessfulBuild, flow.LastUnsuccessfulBuild, flow.ActiveBuilds = buildedges.RelevantBuilds(g, flow.Build)
	}

	for _, input := range g.SuccessorNodesByEdgeKind(node, imageedges.ReferencedImageStreamGraphEdgeKind) {
		covered.Insert(input.ID())
		imageStreamNode := input.(*imagegraph.ImageStreamNode)

		flow.DestinationResolved = (len(imageStreamNode.Status.DockerImageRepository) != 0)
	}

	return flow, covered
}
Esempio n. 4
0
// FindUnpushableBuildConfigs checks all build configs that will output to an IST backed by an ImageStream and checks to make sure their builds can push.
func FindUnpushableBuildConfigs(g osgraph.Graph) []osgraph.Marker {
	markers := []osgraph.Marker{}

bc:
	for _, bcNode := range g.NodesByKind(buildgraph.BuildConfigNodeKind) {
		for _, istNode := range g.SuccessorNodesByEdgeKind(bcNode, buildedges.BuildOutputEdgeKind) {
			for _, uncastImageStreamNode := range g.SuccessorNodesByEdgeKind(istNode, imageedges.ReferencedImageStreamGraphEdgeKind) {
				imageStreamNode := uncastImageStreamNode.(*imagegraph.ImageStreamNode)

				if len(imageStreamNode.Status.DockerImageRepository) == 0 {
					markers = append(markers, osgraph.Marker{
						Node:         bcNode,
						RelatedNodes: []graph.Node{istNode},

						Severity: osgraph.WarningSeverity,
						Key:      MissingRequiredRegistryWarning,
						Message: fmt.Sprintf("%s is pushing to %s that is using %s, but the administrator has not configured the integrated Docker registry.  (oadm registry)",
							bcNode.(*buildgraph.BuildConfigNode).ResourceString(), istNode.(*imagegraph.ImageStreamTagNode).ResourceString(), imageStreamNode.ResourceString()),
					})

					continue bc
				}
			}
		}
	}

	return markers
}
Esempio n. 5
0
// FindHPASpecsMissingScaleRefs finds all Horizontal Pod Autoscalers whose scale reference points to an object that doesn't exist
// or that the client does not have the permission to see.
func FindHPASpecsMissingScaleRefs(graph osgraph.Graph, namer osgraph.Namer) []osgraph.Marker {
	markers := []osgraph.Marker{}

	for _, uncastNode := range graph.NodesByKind(kubenodes.HorizontalPodAutoscalerNodeKind) {
		node := uncastNode.(*kubenodes.HorizontalPodAutoscalerNode)

		scaledObjects := graph.SuccessorNodesByEdgeKind(
			uncastNode,
			kubegraph.ScalingEdgeKind,
		)

		if len(scaledObjects) < 1 {
			markers = append(markers, createMissingScaleRefMarker(node, nil, namer))
			continue
		}

		for _, scaleRef := range scaledObjects {
			if existenceChecker, ok := scaleRef.(osgraph.ExistenceChecker); ok && !existenceChecker.Found() {
				// if this node is synthetic, we can't be sure that the HPA is scaling something that actually exists
				markers = append(markers, createMissingScaleRefMarker(node, scaleRef, namer))
			}
		}
	}

	return markers
}
Esempio n. 6
0
// NewImagePipeline attempts to locate a build flow from the provided node. If no such
// build flow can be located, false is returned.
func NewImagePipelineFromBuildConfigNode(g osgraph.Graph, bcNode *buildgraph.BuildConfigNode) (ImagePipeline, IntSet) {
	covered := IntSet{}
	covered.Insert(bcNode.ID())

	flow := ImagePipeline{}

	base, src, coveredInputs, _ := findBuildInputs(g, bcNode)
	covered.Insert(coveredInputs.List()...)
	flow.BaseImage = base
	flow.Source = src
	flow.Build = bcNode
	flow.LastSuccessfulBuild, flow.LastUnsuccessfulBuild, flow.ActiveBuilds = buildedges.RelevantBuilds(g, flow.Build)

	// we should have at most one
	for _, buildOutputNode := range g.SuccessorNodesByEdgeKind(bcNode, buildedges.BuildOutputEdgeKind) {
		// this will handle the imagestream tag case
		for _, input := range g.SuccessorNodesByEdgeKind(buildOutputNode, imageedges.ReferencedImageStreamGraphEdgeKind) {
			imageStreamNode := input.(*imagegraph.ImageStreamNode)

			flow.DestinationResolved = (len(imageStreamNode.Status.DockerImageRepository) != 0)
		}

		// TODO handle the DockerImage case
	}

	return flow, covered
}
Esempio n. 7
0
// NewReplicationController returns the ReplicationController and a set of all the NodeIDs covered by the ReplicationController
func NewReplicationController(g osgraph.Graph, rcNode *kubegraph.ReplicationControllerNode) (ReplicationController, IntSet) {
	covered := IntSet{}
	covered.Insert(rcNode.ID())

	rcView := ReplicationController{}
	rcView.RC = rcNode

	for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, kubeedges.ManagedByRCEdgeKind) {
		podNode := uncastPodNode.(*kubegraph.PodNode)
		covered.Insert(podNode.ID())
		rcView.OwnedPods = append(rcView.OwnedPods, podNode)

		// check to see if this pod is managed by more than one RC
		uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, kubeedges.ManagedByRCEdgeKind)
		if len(uncastOwningRCs) > 1 {
			for _, uncastOwningRC := range uncastOwningRCs {
				if uncastOwningRC.ID() == rcNode.ID() {
					continue
				}

				conflictingRC := uncastOwningRC.(*kubegraph.ReplicationControllerNode)
				rcView.ConflictingRCs = append(rcView.ConflictingRCs, conflictingRC)

				conflictingPods, ok := rcView.ConflictingRCIDToPods[conflictingRC.ID()]
				if !ok {
					conflictingPods = []*kubegraph.PodNode{}
				}
				conflictingPods = append(conflictingPods, podNode)
				rcView.ConflictingRCIDToPods[conflictingRC.ID()] = conflictingPods
			}
		}
	}

	return rcView, covered
}
Esempio n. 8
0
func doesImageStreamExist(g osgraph.Graph, istag graph.Node) (graph.Node, bool) {
	for _, imagestream := range g.SuccessorNodesByEdgeKind(istag, imageedges.ReferencedImageStreamGraphEdgeKind) {
		return imagestream, imagestream.(*imagegraph.ImageStreamNode).Found()
	}
	for _, imagestream := range g.SuccessorNodesByEdgeKind(istag, imageedges.ReferencedImageStreamImageGraphEdgeKind) {
		return imagestream, imagestream.(*imagegraph.ImageStreamNode).Found()
	}
	return nil, false
}
Esempio n. 9
0
func imageStreamTagScheduled(g osgraph.Graph, input graph.Node, base ImageTagLocation) (scheduled bool) {
	for _, uncastImageStreamNode := range g.SuccessorNodesByEdgeKind(input, imageedges.ReferencedImageStreamGraphEdgeKind) {
		imageStreamNode := uncastImageStreamNode.(*imagegraph.ImageStreamNode)
		if imageStreamNode.ImageStream != nil {
			if tag, ok := imageStreamNode.ImageStream.Spec.Tags[base.ImageTag()]; ok {
				scheduled = tag.ImportPolicy.Scheduled
				return
			}
		}
	}
	return
}
Esempio n. 10
0
// hasUnresolvedImageStreamTag checks all build configs that will output to an IST backed by an ImageStream and checks to make sure their builds can push.
func hasUnresolvedImageStreamTag(g osgraph.Graph) bool {
	for _, bcNode := range g.NodesByKind(buildgraph.BuildConfigNodeKind) {
		for _, istNode := range g.SuccessorNodesByEdgeKind(bcNode, buildedges.BuildOutputEdgeKind) {
			for _, uncastImageStreamNode := range g.SuccessorNodesByEdgeKind(istNode, imageedges.ReferencedImageStreamGraphEdgeKind) {
				imageStreamNode := uncastImageStreamNode.(*imagegraph.ImageStreamNode)
				if len(imageStreamNode.Status.DockerImageRepository) == 0 {
					return true
				}
			}
		}
	}

	return false
}
Esempio n. 11
0
// latestBuild returns the latest build for the provided buildConfig.
func latestBuild(g osgraph.Graph, bc graph.Node) *buildgraph.BuildNode {
	builds := []*buildapi.Build{}
	buildNameToNode := map[string]*buildgraph.BuildNode{}
	for _, buildNode := range g.SuccessorNodesByEdgeKind(bc, buildedges.BuildEdgeKind) {
		build := buildNode.(*buildgraph.BuildNode)
		buildNameToNode[build.Build.Name] = build
		builds = append(builds, build.Build)
	}
	if len(builds) == 0 {
		return nil
	}
	sort.Sort(sort.Reverse(buildapi.BuildPtrSliceByCreationTimestamp(builds)))
	return buildNameToNode[builds[0].Name]
}
Esempio n. 12
0
// GetLatestBuild returns the latest build for the provided buildConfig.
func GetLatestBuild(g osgraph.Graph, bc graph.Node) *buildgraph.BuildNode {
	builds := g.SuccessorNodesByEdgeKind(bc, BuildEdgeKind)
	if len(builds) == 0 {
		return nil
	}
	latestBuild := builds[0].(*buildgraph.BuildNode)

	for _, buildNode := range builds[1:] {
		if build, ok := buildNode.(*buildgraph.BuildNode); ok {
			if latestBuild.Build.CreationTimestamp.Before(build.Build.CreationTimestamp) {
				latestBuild = build
			}
		}
	}

	return latestBuild
}
Esempio n. 13
0
// FindMissingInputImageStreams checks all build configs and confirms that their From element exists
//
// Precedence of failures:
// 1. A build config's input points to an image stream that does not exist
// 2. A build config's input uses an image stream tag reference in an existing image stream, but no images within the image stream have that tag assigned
// 3. A build config's input uses an image stream image reference in an exisiting image stream, but no images within the image stream have the supplied image hexadecimal ID
func FindMissingInputImageStreams(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker {
	markers := []osgraph.Marker{}

	for _, bcNode := range g.NodesByKind(buildgraph.BuildConfigNodeKind) {
		for _, bcInputNode := range g.PredecessorNodesByEdgeKind(bcNode, buildedges.BuildInputImageEdgeKind) {
			switch bcInputNode.(type) {
			case *imagegraph.ImageStreamTagNode:

				for _, uncastImageStreamNode := range g.SuccessorNodesByEdgeKind(bcInputNode, imageedges.ReferencedImageStreamGraphEdgeKind) {
					imageStreamNode := uncastImageStreamNode.(*imagegraph.ImageStreamNode)

					// note, BuildConfig.Spec.BuildSpec.Strategy.[Docker|Source|Custom]Stragegy.From Input of ImageStream has been converted to ImageStreamTag on the vX to api conversion
					// prior to our reaching this point in the code; so there is not need to check for that type vs. ImageStreamTag or ImageStreamImage;

					tagNode, _ := bcInputNode.(*imagegraph.ImageStreamTagNode)
					imageStream := imageStreamNode.Object().(*imageapi.ImageStream)
					if _, ok := imageStream.Status.Tags[tagNode.ImageTag()]; !ok {

						markers = append(markers, getImageStreamTagMarker(g, f, bcInputNode, imageStreamNode, tagNode, bcNode))

					}

				}

			case *imagegraph.ImageStreamImageNode:

				for _, uncastImageStreamNode := range g.SuccessorNodesByEdgeKind(bcInputNode, imageedges.ReferencedImageStreamImageGraphEdgeKind) {
					imageStreamNode := uncastImageStreamNode.(*imagegraph.ImageStreamNode)

					imageNode, _ := bcInputNode.(*imagegraph.ImageStreamImageNode)
					imageStream := imageStreamNode.Object().(*imageapi.ImageStream)
					found, imageID := validImageStreamImage(imageNode, imageStream)
					if !found {

						markers = append(markers, getImageStreamImageMarker(g, f, bcNode, bcInputNode, imageStreamNode, imageNode, imageStream, imageID))

					}

				}

			}

		}
	}
	return markers
}
Esempio n. 14
0
// FindUnpushableBuildConfigs checks all build configs that will output to an IST backed by an ImageStream and checks to make sure their builds can push.
func FindUnpushableBuildConfigs(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker {
	markers := []osgraph.Marker{}

	// note, unlike with Inputs, ImageStreamImage is not a valid type for build output

bc:
	for _, bcNode := range g.NodesByKind(buildgraph.BuildConfigNodeKind) {
		for _, istNode := range g.SuccessorNodesByEdgeKind(bcNode, buildedges.BuildOutputEdgeKind) {
			for _, uncastImageStreamNode := range g.SuccessorNodesByEdgeKind(istNode, imageedges.ReferencedImageStreamGraphEdgeKind) {
				imageStreamNode := uncastImageStreamNode.(*imagegraph.ImageStreamNode)

				if !imageStreamNode.IsFound {
					markers = append(markers, osgraph.Marker{
						Node:         bcNode,
						RelatedNodes: []graph.Node{istNode},

						Severity: osgraph.ErrorSeverity,
						Key:      MissingOutputImageStreamErr,
						Message: fmt.Sprintf("%s is pushing to %s, but the image stream for that tag does not exist.",
							f.ResourceName(bcNode), f.ResourceName(istNode)),
					})

					continue
				}

				if len(imageStreamNode.Status.DockerImageRepository) == 0 {
					markers = append(markers, osgraph.Marker{
						Node:         bcNode,
						RelatedNodes: []graph.Node{istNode},

						Severity: osgraph.ErrorSeverity,
						Key:      MissingRequiredRegistryErr,
						Message: fmt.Sprintf("%s is pushing to %s, but the administrator has not configured the integrated Docker registry.",
							f.ResourceName(bcNode), f.ResourceName(istNode)),
						Suggestion: osgraph.Suggestion("oc adm registry -h"),
					})

					continue bc
				}
			}
		}
	}

	return markers
}
Esempio n. 15
0
// RelevantDeployments returns the active deployment and a list of inactive deployments (in order from newest to oldest)
func RelevantDeployments(g osgraph.Graph, dcNode *deploygraph.DeploymentConfigNode) (*kubegraph.ReplicationControllerNode, []*kubegraph.ReplicationControllerNode) {
	allDeployments := []*kubegraph.ReplicationControllerNode{}
	uncastDeployments := g.SuccessorNodesByEdgeKind(dcNode, DeploymentEdgeKind)
	if len(uncastDeployments) == 0 {
		return nil, []*kubegraph.ReplicationControllerNode{}
	}

	for i := range uncastDeployments {
		allDeployments = append(allDeployments, uncastDeployments[i].(*kubegraph.ReplicationControllerNode))
	}

	sort.Sort(RecentDeploymentReferences(allDeployments))

	if dcNode.DeploymentConfig.Status.LatestVersion == deployutil.DeploymentVersionFor(allDeployments[0]) {
		return allDeployments[0], allDeployments[1:]
	}

	return nil, allDeployments
}
Esempio n. 16
0
func pvcMarker(g osgraph.Graph, f osgraph.Namer, dcNode *deploygraph.DeploymentConfigNode) *osgraph.Marker {
	for _, uncastPvcNode := range g.SuccessorNodesByEdgeKind(dcNode, deployedges.VolumeClaimEdgeKind) {
		pvcNode := uncastPvcNode.(*kubegraph.PersistentVolumeClaimNode)

		if !pvcNode.Found() {
			return &osgraph.Marker{
				Node:         dcNode,
				RelatedNodes: []graph.Node{uncastPvcNode},

				Severity: osgraph.WarningSeverity,
				Key:      MissingPVCWarning,
				Message:  fmt.Sprintf("%s points to a missing persistent volume claim (%s).", f.ResourceName(dcNode), f.ResourceName(pvcNode)),
				// TODO: Suggestion: osgraph.Suggestion(fmt.Sprintf("oc create pvc ...")),
			}
		}

		dc := dcNode.DeploymentConfig
		rollingParams := dc.Spec.Strategy.RollingParams
		isBlockedBySize := dc.Spec.Replicas > 1
		isBlockedRolling := rollingParams != nil && rollingParams.MaxSurge.IntValue() > 0

		// If the claim is not RWO or deployments will not have more than a pod running at any time
		// then they should be fine.
		if !hasRWOAccess(pvcNode) || (!isBlockedRolling && !isBlockedBySize) {
			continue
		}

		// This shouldn't be an issue on single-host clusters but they are not the common case anyway.
		// If github.com/kubernetes/kubernetes/issues/26567 ever gets fixed upstream, then we can drop
		// this warning.
		return &osgraph.Marker{
			Node:         dcNode,
			RelatedNodes: []graph.Node{uncastPvcNode},

			Severity: osgraph.WarningSeverity,
			Key:      SingleHostVolumeWarning,
			Message:  fmt.Sprintf("%s references a volume which may only be used in a single pod at a time - this may lead to hung deployments", f.ResourceName(dcNode)),
		}
	}
	return nil
}
Esempio n. 17
0
// FindMissingPortMapping checks all routes and reports those that don't specify a port while
// the service they are routing to, has multiple ports. Also if a service for a route doesn't
// exist, will be reported.
func FindMissingPortMapping(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker {
	markers := []osgraph.Marker{}

route:
	for _, uncastRouteNode := range g.NodesByKind(routegraph.RouteNodeKind) {
		for _, uncastServiceNode := range g.SuccessorNodesByEdgeKind(uncastRouteNode, routeedges.ExposedThroughRouteEdgeKind) {
			routeNode := uncastRouteNode.(*routegraph.RouteNode)
			svcNode := uncastServiceNode.(*kubegraph.ServiceNode)

			if !svcNode.Found() {
				markers = append(markers, osgraph.Marker{
					Node:         routeNode,
					RelatedNodes: []graph.Node{svcNode},

					Severity: osgraph.WarningSeverity,
					Key:      MissingServiceWarning,
					Message: fmt.Sprintf("%s is supposed to route traffic to %s but %s doesn't exist.",
						f.ResourceName(routeNode), f.ResourceName(svcNode), f.ResourceName(svcNode)),
				})

				continue route
			}

			if len(svcNode.Spec.Ports) > 1 && (routeNode.Spec.Port == nil || len(routeNode.Spec.Port.TargetPort.String()) == 0) {
				markers = append(markers, osgraph.Marker{
					Node:         routeNode,
					RelatedNodes: []graph.Node{svcNode},

					Severity: osgraph.WarningSeverity,
					Key:      MissingRoutePortWarning,
					Message: fmt.Sprintf("%s doesn't have a port specified and is routing traffic to %s which uses multiple ports.",
						f.ResourceName(routeNode), f.ResourceName(svcNode)),
				})

				continue route
			}
		}
	}

	return markers
}
Esempio n. 18
0
// RelevantBuilds returns the lastSuccessful build, lastUnsuccesful build, and a list of active builds
func RelevantBuilds(g osgraph.Graph, bcNode *buildgraph.BuildConfigNode) (*buildgraph.BuildNode, *buildgraph.BuildNode, []*buildgraph.BuildNode) {
	var (
		lastSuccessfulBuild   *buildgraph.BuildNode
		lastUnsuccessfulBuild *buildgraph.BuildNode
	)
	activeBuilds := []*buildgraph.BuildNode{}
	allBuilds := []*buildgraph.BuildNode{}
	uncastBuilds := g.SuccessorNodesByEdgeKind(bcNode, BuildEdgeKind)

	for i := range uncastBuilds {
		buildNode := uncastBuilds[i].(*buildgraph.BuildNode)
		if belongsToBuildConfig(bcNode.BuildConfig, buildNode.Build) {
			allBuilds = append(allBuilds, buildNode)
		}
	}

	if len(allBuilds) == 0 {
		return nil, nil, []*buildgraph.BuildNode{}
	}

	sort.Sort(RecentBuildReferences(allBuilds))

	for i := range allBuilds {
		switch allBuilds[i].Build.Status.Phase {
		case buildapi.BuildPhaseComplete:
			if lastSuccessfulBuild == nil {
				lastSuccessfulBuild = allBuilds[i]
			}
		case buildapi.BuildPhaseFailed, buildapi.BuildPhaseCancelled, buildapi.BuildPhaseError:
			if lastUnsuccessfulBuild == nil {
				lastUnsuccessfulBuild = allBuilds[i]
			}
		default:
			activeBuilds = append(activeBuilds, allBuilds[i])
		}
	}

	return lastSuccessfulBuild, lastUnsuccessfulBuild, activeBuilds
}
Esempio n. 19
0
func FindDuelingReplicationControllers(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker {
	markers := []osgraph.Marker{}

	for _, uncastRCNode := range g.NodesByKind(kubegraph.ReplicationControllerNodeKind) {
		rcNode := uncastRCNode.(*kubegraph.ReplicationControllerNode)

		for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, kubeedges.ManagedByRCEdgeKind) {
			podNode := uncastPodNode.(*kubegraph.PodNode)

			// check to see if this pod is managed by more than one RC
			uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, kubeedges.ManagedByRCEdgeKind)
			if len(uncastOwningRCs) > 1 {
				involvedRCNames := []string{}
				relatedNodes := []graph.Node{uncastPodNode}

				for _, uncastOwningRC := range uncastOwningRCs {
					if uncastOwningRC.ID() == rcNode.ID() {
						continue
					}
					owningRC := uncastOwningRC.(*kubegraph.ReplicationControllerNode)
					involvedRCNames = append(involvedRCNames, f.ResourceName(owningRC))

					relatedNodes = append(relatedNodes, uncastOwningRC)
				}

				markers = append(markers, osgraph.Marker{
					Node:         rcNode,
					RelatedNodes: relatedNodes,

					Severity: osgraph.WarningSeverity,
					Key:      DuelingReplicationControllerWarning,
					Message:  fmt.Sprintf("%s is competing for %s with %s", f.ResourceName(rcNode), f.ResourceName(podNode), strings.Join(involvedRCNames, ", ")),
				})
			}
		}
	}

	return markers
}
Esempio n. 20
0
// FindMissingInputImageStreams checks all build configs and confirms that their From element exists
//
// Precedence of failures:
// 1. A build config's input points to an image stream that does not exist
// 2. A build config's input uses an image stream tag reference in an existing image stream, but no images within the image stream have that tag assigned
// 3. A build config's input uses an image stream image reference in an exisiting image stream, but no images within the image stream have the supplied image hexadecimal ID
func FindMissingInputImageStreams(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker {
	markers := []osgraph.Marker{}

	for _, bcNode := range g.NodesByKind(buildgraph.BuildConfigNodeKind) {
		for _, bcInputNode := range g.PredecessorNodesByEdgeKind(bcNode, buildedges.BuildInputImageEdgeKind) {
			switch bcInputNode.(type) {
			case *imagegraph.ImageStreamTagNode:

				for _, uncastImageStreamNode := range g.SuccessorNodesByEdgeKind(bcInputNode, imageedges.ReferencedImageStreamGraphEdgeKind) {
					imageStreamNode := uncastImageStreamNode.(*imagegraph.ImageStreamNode)

					// note, BuildConfig.Spec.BuildSpec.Strategy.[Docker|Source|Custom]Stragegy.From Input of ImageStream has been converted to ImageStreamTag on the vX to api conversion
					// prior to our reaching this point in the code; so there is not need to check for that type vs. ImageStreamTag or ImageStreamImage;

					tagNode, _ := bcInputNode.(*imagegraph.ImageStreamTagNode)
					imageStream := imageStreamNode.Object().(*imageapi.ImageStream)
					if _, ok := imageStream.Status.Tags[tagNode.ImageTag()]; !ok {

						markers = append(markers, osgraph.Marker{
							Node: bcNode,
							RelatedNodes: []graph.Node{bcInputNode,
								imageStreamNode},
							Severity:   osgraph.WarningSeverity,
							Key:        MissingImageStreamTagWarning,
							Message:    fmt.Sprintf("%s builds from %s, but the image stream tag does not exist.", f.ResourceName(bcNode), f.ResourceName(bcInputNode)),
							Suggestion: osgraph.Suggestion(fmt.Sprintf("examine analysis of build config outputs from this command and see if they build %s", f.ResourceName(bcInputNode))),
						})

					}

				}

			case *imagegraph.ImageStreamImageNode:

				for _, uncastImageStreamNode := range g.SuccessorNodesByEdgeKind(bcInputNode, imageedges.ReferencedImageStreamImageGraphEdgeKind) {
					imageStreamNode := uncastImageStreamNode.(*imagegraph.ImageStreamNode)

					imageNode, _ := bcInputNode.(*imagegraph.ImageStreamImageNode)
					imageStream := imageStreamNode.Object().(*imageapi.ImageStream)
					found, imageID, suggestion := validImageStreamImage(imageNode, imageStream)
					if !found {

						markers = append(markers, osgraph.Marker{
							Node: bcNode,
							RelatedNodes: []graph.Node{bcInputNode,
								imageStreamNode},
							Severity:   osgraph.WarningSeverity,
							Key:        MissingImageStreamImageWarning,
							Message:    fmt.Sprintf("%s builds from %s, but the image stream image does not exist.", f.ResourceName(bcNode), f.ResourceName(bcInputNode)),
							Suggestion: osgraph.Suggestion(fmt.Sprintf(suggestion, imageID, f.ResourceName(imageStreamNode))),
						})

					}

				}

			}

		}
	}
	return markers
}
Esempio n. 21
0
func routePortMarker(g osgraph.Graph, f osgraph.Namer, routeNode *routegraph.RouteNode) *osgraph.Marker {
	for _, uncastServiceNode := range g.SuccessorNodesByEdgeKind(routeNode, routeedges.ExposedThroughRouteEdgeKind) {
		svcNode := uncastServiceNode.(*kubegraph.ServiceNode)

		if !svcNode.Found() {
			return &osgraph.Marker{
				Node:         routeNode,
				RelatedNodes: []graph.Node{svcNode},

				Severity: osgraph.WarningSeverity,
				Key:      MissingServiceWarning,
				Message: fmt.Sprintf("%s is supposed to route traffic to %s but %s doesn't exist.",
					f.ResourceName(routeNode), f.ResourceName(svcNode), f.ResourceName(svcNode)),
				// TODO: Suggest 'oc create service' once that's a thing.
				// See https://github.com/kubernetes/kubernetes/pull/19509
			}
		}

		if len(svcNode.Spec.Ports) > 1 && (routeNode.Spec.Port == nil || len(routeNode.Spec.Port.TargetPort.String()) == 0) {
			return &osgraph.Marker{
				Node:         routeNode,
				RelatedNodes: []graph.Node{svcNode},

				Severity: osgraph.WarningSeverity,
				Key:      MissingRoutePortWarning,
				Message: fmt.Sprintf("%s doesn't have a port specified and is routing traffic to %s which uses multiple ports.",
					f.ResourceName(routeNode), f.ResourceName(svcNode)),
			}
		}

		if routeNode.Spec.Port == nil {
			// If no port is specified, we don't need to analyze any further.
			return nil
		}

		routePortString := routeNode.Spec.Port.TargetPort.String()
		if routePort, err := strconv.Atoi(routePortString); err == nil {
			for _, port := range svcNode.Spec.Ports {
				if port.TargetPort.IntValue() == routePort {
					return nil
				}
			}

			// route has a numeric port, service has no port with that number as a targetPort.
			marker := &osgraph.Marker{
				Node:         routeNode,
				RelatedNodes: []graph.Node{svcNode},

				Severity: osgraph.WarningSeverity,
				Key:      WrongRoutePortWarning,
				Message: fmt.Sprintf("%s has a port specified (%d) but %s has no such targetPort.",
					f.ResourceName(routeNode), routePort, f.ResourceName(svcNode)),
			}
			if len(svcNode.Spec.Ports) == 1 {
				marker.Suggestion = osgraph.Suggestion(fmt.Sprintf("oc patch %s -p '{\"spec\":{\"port\":{\"targetPort\": %d}}}'", f.ResourceName(routeNode), svcNode.Spec.Ports[0].TargetPort.IntValue()))
			}

			return marker
		}

		for _, port := range svcNode.Spec.Ports {
			if port.Name == routePortString {
				return nil
			}
		}

		// route has a named port, service has no port with that name.
		marker := &osgraph.Marker{
			Node:         routeNode,
			RelatedNodes: []graph.Node{svcNode},

			Severity: osgraph.WarningSeverity,
			Key:      WrongRoutePortWarning,
			Message: fmt.Sprintf("%s has a named port specified (%q) but %s has no such named port.",
				f.ResourceName(routeNode), routePortString, f.ResourceName(svcNode)),
		}
		if len(svcNode.Spec.Ports) == 1 {
			marker.Suggestion = osgraph.Suggestion(fmt.Sprintf("oc patch %s -p '{\"spec\":{\"port\":{\"targetPort\": %d}}}'", f.ResourceName(routeNode), svcNode.Spec.Ports[0].TargetPort.IntValue()))
		}

		return marker
	}
	return nil
}