func tagReferenceToTagEvent(stream *api.ImageStream, tagRef api.TagReference, tagOrID string) (*api.TagEvent, error) { switch tagRef.From.Kind { case "DockerImage": return &api.TagEvent{ Created: util.Now(), DockerImageReference: tagRef.From.Name, }, nil case "ImageStreamImage": ref, err := api.DockerImageReferenceForStream(stream) if err != nil { return nil, err } resolvedIDs := api.ResolveImageID(stream, tagOrID) switch len(resolvedIDs) { case 1: ref.ID = resolvedIDs.List()[0] return &api.TagEvent{ Created: util.Now(), DockerImageReference: ref.String(), Image: ref.ID, }, nil case 0: return nil, fmt.Errorf("no images match the prefix %q", tagOrID) default: return nil, fmt.Errorf("multiple images match the prefix %q: %s", tagOrID, strings.Join(resolvedIDs.List(), ", ")) } case "ImageStreamTag": return api.LatestTaggedImage(stream, tagOrID), nil default: return nil, fmt.Errorf("invalid from.kind %q: it must be ImageStreamImage or ImageStreamTag", tagRef.From.Kind) } }
// checkNodeReady checks raw node ready condition, without transition timestamp set. func (s *NodeController) checkNodeReady(node *api.Node) *api.NodeCondition { switch status, err := s.kubeletClient.HealthCheck(node.Name); { case err != nil: glog.V(2).Infof("NodeController: node %s health check error: %v", node.Name, err) return &api.NodeCondition{ Type: api.NodeReady, Status: api.ConditionUnknown, Reason: fmt.Sprintf("Node health check error: %v", err), LastProbeTime: util.Now(), } case status == probe.Failure: return &api.NodeCondition{ Type: api.NodeReady, Status: api.ConditionNone, Reason: fmt.Sprintf("Node health check failed: kubelet /healthz endpoint returns not ok"), LastProbeTime: util.Now(), } default: return &api.NodeCondition{ Type: api.NodeReady, Status: api.ConditionFull, Reason: fmt.Sprintf("Node health check succeeded: kubelet /healthz endpoint returns ok"), LastProbeTime: util.Now(), } } }
// HandlePod updates the state of the build based on the pod state func (bc *BuildPodController) HandlePod(pod *kapi.Pod) error { obj, exists, err := bc.BuildStore.Get(buildKey(pod)) if err != nil { glog.V(4).Infof("Error getting Build for pod %s/%s: %v", pod.Namespace, pod.Name, err) return err } if !exists || obj == nil { glog.V(5).Infof("No Build found for pod %s/%s", pod.Namespace, pod.Name) return nil } build := obj.(*buildapi.Build) // A cancelling event was triggered for the build, delete its pod and update build status. if build.Status.Cancelled { glog.V(4).Infof("Cancelling Build %s/%s.", build.Namespace, build.Name) if err := bc.CancelBuild(build, pod); err != nil { return fmt.Errorf("failed to cancel Build %s/%s: %v, will retry", build.Namespace, build.Name, err) } return nil } nextStatus := build.Status.Phase switch pod.Status.Phase { case kapi.PodRunning: // The pod's still running nextStatus = buildapi.BuildPhaseRunning case kapi.PodSucceeded, kapi.PodFailed: // Check the exit codes of all the containers in the pod nextStatus = buildapi.BuildPhaseComplete for _, info := range pod.Status.ContainerStatuses { if info.State.Terminated != nil && info.State.Terminated.ExitCode != 0 { nextStatus = buildapi.BuildPhaseFailed break } } } if build.Status.Phase != nextStatus { glog.V(4).Infof("Updating Build %s/%s status %s -> %s", build.Namespace, build.Name, build.Status.Phase, nextStatus) build.Status.Phase = nextStatus if buildutil.IsBuildComplete(build) { now := util.Now() build.Status.CompletionTimestamp = &now } if build.Status.Phase == buildapi.BuildPhaseRunning { now := util.Now() build.Status.StartTimestamp = &now } if err := bc.BuildUpdater.Update(build.Namespace, build); err != nil { return fmt.Errorf("failed to update Build %s/%s: %v", build.Namespace, build.Name, err) } glog.V(4).Infof("Build %s/%s status was updated %s -> %s", build.Namespace, build.Name, build.Status.Phase, nextStatus) } return nil }
// HandlePod updates the state of the build based on the pod state func (bc *BuildPodController) HandlePod(pod *kapi.Pod) error { obj, exists, err := bc.BuildStore.Get(buildKey(pod)) if err != nil { glog.V(4).Infof("Error getting Build for pod %s/%s: %v", pod.Namespace, pod.Name, err) return err } if !exists || obj == nil { glog.V(5).Infof("No Build found for pod %s/%s", pod.Namespace, pod.Name) return nil } build := obj.(*buildapi.Build) nextStatus := build.Status.Phase switch pod.Status.Phase { case kapi.PodRunning: // The pod's still running nextStatus = buildapi.BuildPhaseRunning case kapi.PodSucceeded: // Check the exit codes of all the containers in the pod nextStatus = buildapi.BuildPhaseComplete if len(pod.Status.ContainerStatuses) == 0 { // no containers in the pod means something went badly wrong, so the build // should be failed. glog.V(2).Infof("Failing build %s/%s because the pod has no containers", build.Namespace, build.Name) nextStatus = buildapi.BuildPhaseFailed } else { for _, info := range pod.Status.ContainerStatuses { if info.State.Terminated != nil && info.State.Terminated.ExitCode != 0 { nextStatus = buildapi.BuildPhaseFailed break } } } case kapi.PodFailed: nextStatus = buildapi.BuildPhaseFailed } if build.Status.Phase != nextStatus { glog.V(4).Infof("Updating Build %s/%s status %s -> %s", build.Namespace, build.Name, build.Status.Phase, nextStatus) build.Status.Phase = nextStatus if buildutil.IsBuildComplete(build) { now := util.Now() build.Status.CompletionTimestamp = &now } if build.Status.Phase == buildapi.BuildPhaseRunning { now := util.Now() build.Status.StartTimestamp = &now } if err := bc.BuildUpdater.Update(build.Namespace, build); err != nil { return fmt.Errorf("failed to update Build %s/%s: %v", build.Namespace, build.Name, err) } glog.V(4).Infof("Build %s/%s status was updated %s -> %s", build.Namespace, build.Name, build.Status.Phase, nextStatus) } return nil }
func NewEmptyPolicy(namespace string) *authorizationapi.Policy { policy := &authorizationapi.Policy{} policy.Name = authorizationapi.PolicyName policy.Namespace = namespace policy.CreationTimestamp = util.Now() policy.LastModified = util.Now() policy.Roles = make(map[string]*authorizationapi.Role) return policy }
func NewEmptyPolicyBinding(namespace, policyNamespace, policyBindingName string) *authorizationapi.PolicyBinding { binding := &authorizationapi.PolicyBinding{} binding.Name = policyBindingName binding.Namespace = namespace binding.CreationTimestamp = util.Now() binding.LastModified = util.Now() binding.PolicyRef = kapi.ObjectReference{Name: authorizationapi.PolicyName, Namespace: policyNamespace} binding.RoleBindings = make(map[string]*authorizationapi.RoleBinding) return binding }
func TestGetEventExisting(t *testing.T) { // Arrange eventTime := util.Now() event := api.Event{ Reason: "do I exist", Message: "I do, oh my", InvolvedObject: api.ObjectReference{ Kind: "Pod", Name: "clever.name.here", Namespace: "spaceOfName", UID: "D933D32AFB2A238", APIVersion: "version", }, Source: api.EventSource{ Component: "kubelet", Host: "kublet.node4", }, Count: 1, FirstTimestamp: eventTime, LastTimestamp: eventTime, } addOrUpdateEvent(&event) // Act existingEvent := getEvent(&event) // Assert compareEventWithHistoryEntry(&event, &existingEvent, t) }
func TestBuildDecorator(t *testing.T) { build := &buildapi.Build{ ObjectMeta: kapi.ObjectMeta{Name: "buildid", Namespace: "default"}, Parameters: buildapi.BuildParameters{ Source: buildapi.BuildSource{ Type: buildapi.BuildSourceGit, Git: &buildapi.GitBuildSource{ URI: "http://github.com/my/repository", }, ContextDir: "context", }, Strategy: buildapi.BuildStrategy{ Type: buildapi.DockerBuildStrategyType, DockerStrategy: &buildapi.DockerBuildStrategy{}, }, Output: buildapi.BuildOutput{ DockerImageReference: "repository/data", }, }, Status: buildapi.BuildStatusNew, } now := util.Now() startTime := util.NewTime(now.Time.Add(-1 * time.Minute)) build.StartTimestamp = &startTime err := Decorator(build) if err != nil { t.Errorf("Unexpected error decorating build") } if build.Duration <= 0 { t.Errorf("Build duration should be greater than zero") } }
func TestNamespaceStatusStrategy(t *testing.T) { ctx := api.NewDefaultContext() if StatusStrategy.NamespaceScoped() { t.Errorf("Namespaces should not be namespace scoped") } if StatusStrategy.AllowCreateOnUpdate() { t.Errorf("Namespaces should not allow create on update") } now := util.Now() oldNamespace := &api.Namespace{ ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "10"}, Spec: api.NamespaceSpec{Finalizers: []api.FinalizerName{"kubernetes"}}, Status: api.NamespaceStatus{Phase: api.NamespaceActive}, } namespace := &api.Namespace{ ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "9", DeletionTimestamp: &now}, Status: api.NamespaceStatus{Phase: api.NamespaceTerminating}, } StatusStrategy.PrepareForUpdate(namespace, oldNamespace) if namespace.Status.Phase != api.NamespaceTerminating { t.Errorf("Namespace status updates should allow change of phase: %v", namespace.Status.Phase) } if len(namespace.Spec.Finalizers) != 1 || namespace.Spec.Finalizers[0] != api.FinalizerKubernetes { t.Errorf("PrepareForUpdate should have preserved old finalizers") } errs := StatusStrategy.ValidateUpdate(ctx, namespace, oldNamespace) if len(errs) != 0 { t.Errorf("Unexpected error %v", errs) } if namespace.ResourceVersion != "9" { t.Errorf("Incoming resource version on update should not be mutated") } }
func TestEventCreate(t *testing.T) { objReference := &api.ObjectReference{ Kind: "foo", Namespace: "nm", Name: "objref1", UID: "uid", APIVersion: "apiv1", ResourceVersion: "1", } timeStamp := util.Now() event := &api.Event{ //namespace: namespace{"default"}, Condition: "Running", InvolvedObject: *objReference, Timestamp: timeStamp, } c := &testClient{ Request: testRequest{ Method: "POST", Path: "/events", Body: event, }, Response: Response{StatusCode: 200, Body: event}, } response, err := c.Setup().Events("").Create(event) if err != nil { t.Errorf("%#v should be nil.", err) } if e, a := *objReference, response.InvolvedObject; !reflect.DeepEqual(e, a) { t.Errorf("%#v != %#v.", e, a) } }
func TestDeleteNamespaceWithIncompleteFinalizers(t *testing.T) { now := util.Now() fakeEtcdClient, helper := newHelper(t) fakeEtcdClient.ChangeIndex = 1 fakeEtcdClient.Data["/registry/namespaces/foo"] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(latest.Codec, &api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: "foo", DeletionTimestamp: &now, }, Spec: api.NamespaceSpec{ Finalizers: []api.FinalizerName{api.FinalizerKubernetes}, }, Status: api.NamespaceStatus{Phase: api.NamespaceActive}, }), ModifiedIndex: 1, CreatedIndex: 1, }, }, } storage, _, _ := NewStorage(helper) _, err := storage.Delete(api.NewDefaultContext(), "foo", nil) if err == nil { t.Fatalf("expected error: %v", err) } }
// NewFilterBeforePredicate is a function that returns true if the build was created before the current time minus specified duration func NewFilterBeforePredicate(d time.Duration) FilterPredicate { now := util.Now() before := util.NewTime(now.Time.Add(-1 * d)) return func(build *buildapi.Build) bool { return build.CreationTimestamp.Before(before) } }
func (rs *REST) Create(ctx api.Context, obj runtime.Object) (<-chan apiserver.RESTResult, error) { minion, ok := obj.(*api.Minion) if !ok { return nil, fmt.Errorf("not a minion: %#v", obj) } if minion.Name == "" { return nil, fmt.Errorf("ID should not be empty: %#v", minion) } minion.CreationTimestamp = util.Now() return apiserver.MakeAsync(func() (runtime.Object, error) { err := rs.registry.CreateMinion(ctx, minion) if err != nil { return nil, err } minionName := minion.Name minion, err := rs.registry.GetMinion(ctx, minionName) if err == ErrNotHealty { return rs.toApiMinion(minionName), nil } if minion == nil { return nil, ErrDoesNotExist } if err != nil { return nil, err } return minion, nil }), nil }
// TestSort verifies that builds are sorted by most recently created func TestSort(t *testing.T) { present := util.Now() past := util.NewTime(present.Time.Add(-1 * time.Minute)) builds := []*buildapi.Build{ { ObjectMeta: kapi.ObjectMeta{ Name: "past", CreationTimestamp: past, }, }, { ObjectMeta: kapi.ObjectMeta{ Name: "present", CreationTimestamp: present, }, }, } sort.Sort(sortableBuilds(builds)) if builds[0].Name != "present" { t.Errorf("Unexpected sort order") } if builds[1].Name != "past" { t.Errorf("Unexpected sort order") } }
func (rs *REST) Create(obj runtime.Object) (<-chan runtime.Object, error) { minion, ok := obj.(*api.Minion) if !ok { return nil, fmt.Errorf("not a minion: %#v", obj) } if minion.ID == "" { return nil, fmt.Errorf("ID should not be empty: %#v", minion) } minion.CreationTimestamp = util.Now() return apiserver.MakeAsync(func() (runtime.Object, error) { err := rs.registry.Insert(minion.ID) if err != nil { return nil, err } contains, err := rs.registry.Contains(minion.ID) if err != nil { return nil, err } if contains { return rs.toApiMinion(minion.ID), nil } return nil, fmt.Errorf("unable to add minion %#v", minion) }), nil }
func TestAddOrUpdateEventNoExisting(t *testing.T) { // Arrange eventTime := util.Now() event := api.Event{ Reason: "my reasons are many", Message: "my message is love", InvolvedObject: api.ObjectReference{ Kind: "Pod", Name: "awesome.name", Namespace: "betterNamespace", UID: "C934D34AFB20242", APIVersion: "version", }, Source: api.EventSource{ Component: "kubelet", Host: "kublet.node1", }, Count: 1, FirstTimestamp: eventTime, LastTimestamp: eventTime, } // Act result := addOrUpdateEvent(&event) // Assert compareEventWithHistoryEntry(&event, &result, t) }
func TestEventGet(t *testing.T) { objReference := &api.ObjectReference{ Kind: "foo", Namespace: "nm", Name: "objref1", UID: "uid", APIVersion: "apiv1", ResourceVersion: "1", } timeStamp := util.Now() event := &api.Event{ InvolvedObject: *objReference, Timestamp: timeStamp, } c := &testClient{ Request: testRequest{ Method: "GET", Path: "/events/1", Body: nil, }, Response: Response{StatusCode: 200, Body: event}, } response, err := c.Setup().Events("").Get("1") if err != nil { t.Errorf("%#v should be nil.", err) } if e, r := event.InvolvedObject, response.InvolvedObject; !reflect.DeepEqual(e, r) { t.Errorf("%#v != %#v.", e, r) } }
// TestSort verifies that builds are sorted by most recently created func TestSort(t *testing.T) { present := util.Now() past := util.NewTime(present.Time.Add(-1 * time.Minute)) controllers := []*kapi.ReplicationController{ { ObjectMeta: kapi.ObjectMeta{ Name: "past", CreationTimestamp: past, }, }, { ObjectMeta: kapi.ObjectMeta{ Name: "present", CreationTimestamp: present, }, }, } sort.Sort(sortableReplicationControllers(controllers)) if controllers[0].Name != "present" { t.Errorf("Unexpected sort order") } if controllers[1].Name != "past" { t.Errorf("Unexpected sort order") } }
// NewFilterBeforePredicate is a function that returns true if the build was created before the current time minus specified duration func NewFilterBeforePredicate(d time.Duration) FilterPredicate { now := util.Now() before := util.NewTime(now.Time.Add(-1 * d)) return func(item *kapi.ReplicationController) bool { return item.CreationTimestamp.Before(before) } }
func (s *statusManager) SetPodStatus(pod *api.Pod, status api.PodStatus) { podFullName := kubecontainer.GetPodFullName(pod) s.podStatusesLock.Lock() defer s.podStatusesLock.Unlock() oldStatus, found := s.podStatuses[podFullName] // ensure that the start time does not change across updates. if found && oldStatus.StartTime != nil { status.StartTime = oldStatus.StartTime } // if the status has no start time, we need to set an initial time // TODO(yujuhong): Consider setting StartTime when generating the pod // status instead, which would allow statusManager to become a simple cache // again. if status.StartTime.IsZero() { if pod.Status.StartTime.IsZero() { // the pod did not have a previously recorded value so set to now now := util.Now() status.StartTime = &now } else { // the pod had a recorded value, but the kubelet restarted so we need to rebuild cache // based on last observed value status.StartTime = pod.Status.StartTime } } if !found || !reflect.DeepEqual(oldStatus, status) { s.podStatuses[podFullName] = status s.podStatusChannel <- podStatusSyncRequest{pod, status} } else { glog.V(3).Infof("Ignoring same pod status for %s - old: %s new: %s", podFullName, oldStatus, status) } }
// Create registers the given ReplicationController. func (rs *REST) Create(ctx api.Context, obj runtime.Object) (<-chan apiserver.RESTResult, error) { controller, ok := obj.(*api.ReplicationController) if !ok { return nil, fmt.Errorf("not a replication controller: %#v", obj) } if !api.ValidNamespace(ctx, &controller.ObjectMeta) { return nil, errors.NewConflict("controller", controller.Namespace, fmt.Errorf("Controller.Namespace does not match the provided context")) } if len(controller.Name) == 0 { controller.Name = util.NewUUID().String() } // Pod Manifest ID should be assigned by the pod API controller.DesiredState.PodTemplate.DesiredState.Manifest.ID = "" if errs := validation.ValidateReplicationController(controller); len(errs) > 0 { return nil, errors.NewInvalid("replicationController", controller.Name, errs) } controller.CreationTimestamp = util.Now() return apiserver.MakeAsync(func() (runtime.Object, error) { err := rs.registry.CreateController(ctx, controller) if err != nil { return nil, err } return rs.registry.GetController(ctx, controller.Name) }), nil }
func TestDeleteNamespaceWithCompleteFinalizers(t *testing.T) { now := util.Now() fakeEtcdClient, etcdStorage := newEtcdStorage(t) fakeEtcdClient.ChangeIndex = 1 key := etcdtest.AddPrefix("/namespaces/foo") fakeEtcdClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(latest.Codec, &api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: "foo", DeletionTimestamp: &now, }, Spec: api.NamespaceSpec{ Finalizers: []api.FinalizerName{}, }, Status: api.NamespaceStatus{Phase: api.NamespaceActive}, }), ModifiedIndex: 1, CreatedIndex: 1, }, }, } storage, _, _ := NewStorage(etcdStorage) _, err := storage.Delete(api.NewContext(), "foo", nil) if err != nil { t.Fatalf("unexpected error: %v", err) } }
// CancelBuild updates a build status to Cancelled, after its associated pod is deleted. func (bc *BuildController) CancelBuild(build *buildapi.Build) error { if !isBuildCancellable(build) { glog.V(4).Infof("Build %s/%s can be cancelled only if it has pending/running status, not %s.", build.Namespace, build.Name, build.Status.Phase) return nil } glog.V(4).Infof("Cancelling Build %s/%s.", build.Namespace, build.Name) pod, err := bc.PodManager.GetPod(build.Namespace, buildutil.GetBuildPodName(build)) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("Failed to get Pod for build %s/%s: %v", build.Namespace, build.Name, err) } } else { err := bc.PodManager.DeletePod(build.Namespace, pod) if err != nil && !errors.IsNotFound(err) { return fmt.Errorf("Couldn't delete Build Pod %s/%s: %v", build.Namespace, pod.Name, err) } } build.Status.Phase = buildapi.BuildPhaseCancelled now := util.Now() build.Status.CompletionTimestamp = &now if err := bc.BuildUpdater.Update(build.Namespace, build); err != nil { return fmt.Errorf("Failed to update Build %s/%s: %v", build.Namespace, build.Name, err) } glog.V(4).Infof("Build %s/%s was successfully cancelled.", build.Namespace, build.Name) return nil }
func (m *VirtualStorage) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) { if err := rest.BeforeCreate(m.CreateStrategy, ctx, obj); err != nil { return nil, err } role := obj.(*authorizationapi.Role) policy, err := m.EnsurePolicy(ctx) if err != nil { return nil, err } if _, exists := policy.Roles[role.Name]; exists { return nil, kapierrors.NewAlreadyExists("Role", role.Name) } role.ResourceVersion = policy.ResourceVersion policy.Roles[role.Name] = role policy.LastModified = util.Now() if err := m.PolicyStorage.UpdatePolicy(ctx, policy); err != nil { return nil, err } return role, nil }
func TestFilterBeforePredicate(t *testing.T) { youngerThan := time.Hour now := util.Now() old := util.NewTime(now.Time.Add(-1 * youngerThan)) builds := []*buildapi.Build{ { ObjectMeta: kapi.ObjectMeta{ Name: "old", CreationTimestamp: old, }, }, { ObjectMeta: kapi.ObjectMeta{ Name: "new", CreationTimestamp: now, }, }, } filter := &andFilter{ filterPredicates: []FilterPredicate{NewFilterBeforePredicate(youngerThan)}, } result := filter.Filter(builds) if len(result) != 1 { t.Errorf("Unexpected number of results") } if expected, actual := "old", result[0].Name; expected != actual { t.Errorf("expected %v, actual %v", expected, actual) } }
// HandleBuildPodDeletion sets the status of a build to error if the build pod has been deleted func (bc *BuildPodDeleteController) HandleBuildPodDeletion(pod *kapi.Pod) error { glog.V(4).Infof("Handling deletion of build pod %s/%s", pod.Namespace, pod.Name) obj, exists, err := bc.BuildStore.Get(buildKey(pod)) if err != nil { glog.V(4).Infof("Error getting build for pod %s/%s", pod.Namespace, pod.Name) return err } if !exists || obj == nil { glog.V(5).Infof("No Build found for deleted pod %s/%s", pod.Namespace, pod.Name) return nil } build := obj.(*buildapi.Build) if buildutil.IsBuildComplete(build) { glog.V(4).Infof("Pod was deleted but Build %s/%s is already completed, so no need to update it.", build.Namespace, build.Name) return nil } nextStatus := buildapi.BuildPhaseError if build.Status.Phase != nextStatus { glog.V(4).Infof("Updating build %s/%s status %s -> %s", build.Namespace, build.Name, build.Status.Phase, nextStatus) build.Status.Phase = nextStatus build.Status.Message = "The Pod for this Build was deleted before the Build completed." now := util.Now() build.Status.CompletionTimestamp = &now if err := bc.BuildUpdater.Update(build.Namespace, build); err != nil { return fmt.Errorf("Failed to update Build %s/%s: %v", build.Namespace, build.Name, err) } } return nil }
// addImageStreamsToGraph adds all the streams to the graph. The most recent n // image revisions for a tag will be preserved, where n is specified by the // algorithm's tagRevisionsToKeep. Image revisions older than n are candidates // for pruning. if the image stream's age is at least as old as the minimum // threshold in algorithm. Otherwise, if the image stream is younger than the // threshold, all image revisions for that stream are ineligible for pruning. // // addImageStreamsToGraph also adds references from each stream to all the // layers it references (via each image a stream references). func addImageStreamsToGraph(g graph.Graph, streams *imageapi.ImageStreamList, algorithm pruneAlgorithm) { for i := range streams.Items { stream := &streams.Items[i] glog.V(4).Infof("Examining ImageStream %s/%s", stream.Namespace, stream.Name) // use a weak reference for old image revisions by default oldImageRevisionReferenceKind := graph.WeakReferencedImageGraphEdgeKind age := util.Now().Sub(stream.CreationTimestamp.Time) if age < algorithm.keepYoungerThan { // stream's age is below threshold - use a strong reference for old image revisions instead glog.V(4).Infof("Stream %s/%s is below age threshold - none of its images are eligible for pruning", stream.Namespace, stream.Name) oldImageRevisionReferenceKind = graph.ReferencedImageGraphEdgeKind } glog.V(4).Infof("Adding ImageStream %s/%s to graph", stream.Namespace, stream.Name) isNode := graph.ImageStream(g, stream) imageStreamNode := isNode.(*graph.ImageStreamNode) for tag, history := range stream.Status.Tags { for i := range history.Items { n := graph.FindImage(g, history.Items[i].Image) if n == nil { glog.V(1).Infof("Unable to find image %q in graph (from tag=%q, revision=%d, dockerImageReference=%s)", history.Items[i].Image, tag, i, history.Items[i].DockerImageReference) continue } imageNode := n.(*graph.ImageNode) var kind int switch { case i < algorithm.tagRevisionsToKeep: kind = graph.ReferencedImageGraphEdgeKind default: kind = oldImageRevisionReferenceKind } glog.V(4).Infof("Checking for existing strong reference from stream %s/%s to image %s", stream.Namespace, stream.Name, imageNode.Image.Name) if edge := g.EdgeBetween(imageStreamNode, imageNode); edge != nil && g.EdgeKind(edge) == graph.ReferencedImageGraphEdgeKind { glog.V(4).Infof("Strong reference found") continue } glog.V(4).Infof("Adding edge (kind=%d) from %q to %q", kind, imageStreamNode.UniqueName.UniqueName(), imageNode.UniqueName.UniqueName()) g.AddEdge(imageStreamNode, imageNode, kind) glog.V(4).Infof("Adding stream->layer references") // add stream -> layer references so we can prune them later for _, s := range g.Successors(imageNode) { if g.Kind(s) != graph.ImageLayerGraphKind { continue } glog.V(4).Infof("Adding reference from stream %q to layer %q", stream.Name, s.(*graph.ImageLayerNode).Layer) g.AddEdge(imageStreamNode, s, graph.ReferencedImageLayerGraphEdgeKind) } } } } }
// checkNodeSchedulable checks node schedulable condition, without transition timestamp set. func (s *NodeController) checkNodeSchedulable(node *api.Node) *api.NodeCondition { if node.Spec.Unschedulable { return &api.NodeCondition{ Type: api.NodeSchedulable, Status: api.ConditionNone, Reason: "User marked unschedulable during node create/update", LastProbeTime: util.Now(), } } else { return &api.NodeCondition{ Type: api.NodeSchedulable, Status: api.ConditionFull, Reason: "Node is schedulable by default", LastProbeTime: util.Now(), } } }
// getNextBuildNameFromBuild returns name of the next build with random uuid added at the end func getNextBuildNameFromBuild(build *buildapi.Build) string { buildName := build.Name if matched, _ := regexp.MatchString(`^.+-\d-\d+$`, buildName); matched { nameElems := strings.Split(buildName, "-") buildName = strings.Join(nameElems[:len(nameElems)-1], "-") } return fmt.Sprintf("%s-%d", buildName, int32(util.Now().Unix())) }
func GetBootstrapPolicyBinding() *authorizationapi.ClusterPolicyBinding { policyBinding := &authorizationapi.ClusterPolicyBinding{ ObjectMeta: kapi.ObjectMeta{ Name: ":Default", CreationTimestamp: util.Now(), UID: util.NewUUID(), }, LastModified: util.Now(), RoleBindings: make(map[string]*authorizationapi.ClusterRoleBinding), } bindings := bootstrappolicy.GetBootstrapClusterRoleBindings() for i := range bindings { policyBinding.RoleBindings[bindings[i].Name] = &bindings[i] } return policyBinding }