Пример #1
0
func (m *manager) SetPodStatus(pod *api.Pod, status api.PodStatus) {
	m.podStatusesLock.Lock()
	defer m.podStatusesLock.Unlock()

	var oldStatus api.PodStatus
	if cachedStatus, ok := m.podStatuses[pod.UID]; ok {
		oldStatus = cachedStatus.status
	} else if mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod); ok {
		oldStatus = mirrorPod.Status
	} else {
		oldStatus = pod.Status
	}

	// Set ReadyCondition.LastTransitionTime.
	if readyCondition := api.GetPodReadyCondition(status); readyCondition != nil {
		// Need to set LastTransitionTime.
		lastTransitionTime := unversioned.Now()
		oldReadyCondition := api.GetPodReadyCondition(oldStatus)
		if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
			lastTransitionTime = oldReadyCondition.LastTransitionTime
		}
		readyCondition.LastTransitionTime = lastTransitionTime
	}

	// ensure that the start time does not change across updates.
	if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() {
		status.StartTime = oldStatus.StartTime
	} else if status.StartTime.IsZero() {
		// if the status has no start time, we need to set an initial time
		now := unversioned.Now()
		status.StartTime = &now
	}

	m.updateStatusInternal(pod, status)
}
Пример #2
0
func tagReferenceToTagEvent(stream *api.ImageStream, tagRef api.TagReference, tagOrID string) (*api.TagEvent, error) {
	switch tagRef.From.Kind {
	case "DockerImage":
		return &api.TagEvent{
			Created:              unversioned.Now(),
			DockerImageReference: tagRef.From.Name,
		}, nil

	case "ImageStreamImage":
		ref, err := api.DockerImageReferenceForStream(stream)
		if err != nil {
			return nil, err
		}

		resolvedIDs := api.ResolveImageID(stream, tagOrID)
		switch len(resolvedIDs) {
		case 1:
			ref.ID = resolvedIDs.List()[0]
			return &api.TagEvent{
				Created:              unversioned.Now(),
				DockerImageReference: ref.String(),
				Image:                ref.ID,
			}, nil
		case 0:
			return nil, fmt.Errorf("no images match the prefix %q", tagOrID)
		default:
			return nil, fmt.Errorf("multiple images match the prefix %q: %s", tagOrID, strings.Join(resolvedIDs.List(), ", "))
		}
	case "ImageStreamTag":
		return api.LatestTaggedImage(stream, tagOrID), nil
	default:
		return nil, fmt.Errorf("invalid from.kind %q: it must be ImageStreamImage or ImageStreamTag", tagRef.From.Kind)
	}
}
Пример #3
0
func newCondition() experimental.JobCondition {
	return experimental.JobCondition{
		Type:               experimental.JobComplete,
		Status:             api.ConditionTrue,
		LastProbeTime:      unversioned.Now(),
		LastTransitionTime: unversioned.Now(),
	}
}
Пример #4
0
// updateStatusInternal updates the internal status cache, and queues an update to the api server if
// necessary. Returns whether an update was triggered.
// This method IS NOT THREAD SAFE and must be called from a locked function.
func (m *manager) updateStatusInternal(pod *api.Pod, status api.PodStatus, forceUpdate bool) bool {
	var oldStatus api.PodStatus
	cachedStatus, isCached := m.podStatuses[pod.UID]
	if isCached {
		oldStatus = cachedStatus.status
	} else if mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod); ok {
		oldStatus = mirrorPod.Status
	} else {
		oldStatus = pod.Status
	}

	// Set ReadyCondition.LastTransitionTime.
	if readyCondition := api.GetPodReadyCondition(status); readyCondition != nil {
		// Need to set LastTransitionTime.
		lastTransitionTime := unversioned.Now()
		oldReadyCondition := api.GetPodReadyCondition(oldStatus)
		if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
			lastTransitionTime = oldReadyCondition.LastTransitionTime
		}
		readyCondition.LastTransitionTime = lastTransitionTime
	}

	// ensure that the start time does not change across updates.
	if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() {
		status.StartTime = oldStatus.StartTime
	} else if status.StartTime.IsZero() {
		// if the status has no start time, we need to set an initial time
		now := unversioned.Now()
		status.StartTime = &now
	}

	normalizeStatus(&status)
	// The intent here is to prevent concurrent updates to a pod's status from
	// clobbering each other so the phase of a pod progresses monotonically.
	if isCached && isStatusEqual(&cachedStatus.status, &status) && !forceUpdate {
		glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", format.Pod(pod), status)
		return false // No new status.
	}

	newStatus := versionedPodStatus{
		status:       status,
		version:      cachedStatus.version + 1,
		podName:      pod.Name,
		podNamespace: pod.Namespace,
	}
	m.podStatuses[pod.UID] = newStatus

	select {
	case m.podStatusChannel <- podStatusSyncRequest{pod.UID, newStatus}:
		return true
	default:
		// Let the periodic syncBatch handle the update if the channel is full.
		// We can't block, since we hold the mutex lock.
		glog.V(4).Infof("Skpping the status update for pod %q for now because the channel is full; status: %+v",
			format.Pod(pod), status)
		return false
	}
}
Пример #5
0
// HandlePod updates the state of the build based on the pod state
func (bc *BuildPodController) HandlePod(pod *kapi.Pod) error {
	obj, exists, err := bc.BuildStore.Get(buildKey(pod))
	if err != nil {
		glog.V(4).Infof("Error getting build for pod %s/%s: %v", pod.Namespace, pod.Name, err)
		return err
	}
	if !exists || obj == nil {
		glog.V(5).Infof("No build found for pod %s/%s", pod.Namespace, pod.Name)
		return nil
	}

	build := obj.(*buildapi.Build)

	nextStatus := build.Status.Phase
	switch pod.Status.Phase {
	case kapi.PodRunning:
		// The pod's still running
		nextStatus = buildapi.BuildPhaseRunning
	case kapi.PodSucceeded:
		// Check the exit codes of all the containers in the pod
		nextStatus = buildapi.BuildPhaseComplete
		if len(pod.Status.ContainerStatuses) == 0 {
			// no containers in the pod means something went badly wrong, so the build
			// should be failed.
			glog.V(2).Infof("Failing build %s/%s because the pod has no containers", build.Namespace, build.Name)
			nextStatus = buildapi.BuildPhaseFailed
		} else {
			for _, info := range pod.Status.ContainerStatuses {
				if info.State.Terminated != nil && info.State.Terminated.ExitCode != 0 {
					nextStatus = buildapi.BuildPhaseFailed
					break
				}
			}
		}
	case kapi.PodFailed:
		nextStatus = buildapi.BuildPhaseFailed
	}

	if build.Status.Phase != nextStatus && !buildutil.IsBuildComplete(build) {
		glog.V(4).Infof("Updating build %s/%s status %s -> %s", build.Namespace, build.Name, build.Status.Phase, nextStatus)
		build.Status.Phase = nextStatus
		build.Status.Reason = ""
		build.Status.Message = ""
		if buildutil.IsBuildComplete(build) {
			now := unversioned.Now()
			build.Status.CompletionTimestamp = &now
		}
		if build.Status.Phase == buildapi.BuildPhaseRunning {
			now := unversioned.Now()
			build.Status.StartTimestamp = &now
		}
		if err := bc.BuildUpdater.Update(build.Namespace, build); err != nil {
			return fmt.Errorf("failed to update build %s/%s: %v", build.Namespace, build.Name, err)
		}
		glog.V(4).Infof("Build %s/%s status was updated %s -> %s", build.Namespace, build.Name, build.Status.Phase, nextStatus)
	}
	return nil
}
Пример #6
0
func newCondition(conditionType extensions.JobConditionType, reason, message string) extensions.JobCondition {
	return extensions.JobCondition{
		Type:               conditionType,
		Status:             api.ConditionTrue,
		LastProbeTime:      unversioned.Now(),
		LastTransitionTime: unversioned.Now(),
		Reason:             reason,
		Message:            message,
	}
}
Пример #7
0
func NewEmptyPolicy(namespace string) *authorizationapi.Policy {
	policy := &authorizationapi.Policy{}
	policy.Name = authorizationapi.PolicyName
	policy.Namespace = namespace
	policy.CreationTimestamp = unversioned.Now()
	policy.LastModified = unversioned.Now()
	policy.Roles = make(map[string]*authorizationapi.Role)

	return policy
}
Пример #8
0
// NewDeploymentCondition creates a new deployment condition.
func NewDeploymentCondition(condType extensions.DeploymentConditionType, status api.ConditionStatus, reason, message string) *extensions.DeploymentCondition {
	return &extensions.DeploymentCondition{
		Type:               condType,
		Status:             status,
		LastUpdateTime:     unversioned.Now(),
		LastTransitionTime: unversioned.Now(),
		Reason:             reason,
		Message:            message,
	}
}
Пример #9
0
func newCondition(conditionType batch.JobConditionType, reason, message string) batch.JobCondition {
	return batch.JobCondition{
		Type:               conditionType,
		Status:             v1.ConditionTrue,
		LastProbeTime:      unversioned.Now(),
		LastTransitionTime: unversioned.Now(),
		Reason:             reason,
		Message:            message,
	}
}
Пример #10
0
// BeforeDelete tests whether the object can be gracefully deleted. If graceful is set the object
// should be gracefully deleted, if gracefulPending is set the object has already been gracefully deleted
// (and the provided grace period is longer than the time to deletion), and an error is returned if the
// condition cannot be checked or the gracePeriodSeconds is invalid. The options argument may be updated with
// default values if graceful is true. Second place where we set deletionTimestamp is pkg/registry/generic/registry/store.go
// this function is responsible for setting deletionTimestamp during gracefulDeletion, other one for cascading deletions.
func BeforeDelete(strategy RESTDeleteStrategy, ctx api.Context, obj runtime.Object, options *api.DeleteOptions) (graceful, gracefulPending bool, err error) {
	objectMeta, gvk, kerr := objectMetaAndKind(strategy, obj)
	if kerr != nil {
		return false, false, kerr
	}
	// Checking the Preconditions here to fail early. They'll be enforced later on when we actually do the deletion, too.
	if options.Preconditions != nil && options.Preconditions.UID != nil && *options.Preconditions.UID != objectMeta.UID {
		return false, false, errors.NewConflict(unversioned.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, objectMeta.Name, fmt.Errorf("the UID in the precondition (%s) does not match the UID in record (%s). The object might have been deleted and then recreated", *options.Preconditions.UID, objectMeta.UID))
	}
	gracefulStrategy, ok := strategy.(RESTGracefulDeleteStrategy)
	if !ok {
		// If we're not deleting gracefully there's no point in updating Generation, as we won't update
		// the obcject before deleting it.
		return false, false, nil
	}
	// if the object is already being deleted, no need to update generation.
	if objectMeta.DeletionTimestamp != nil {
		// if we are already being deleted, we may only shorten the deletion grace period
		// this means the object was gracefully deleted previously but deletionGracePeriodSeconds was not set,
		// so we force deletion immediately
		if objectMeta.DeletionGracePeriodSeconds == nil {
			return false, false, nil
		}
		// only a shorter grace period may be provided by a user
		if options.GracePeriodSeconds != nil {
			period := int64(*options.GracePeriodSeconds)
			if period > *objectMeta.DeletionGracePeriodSeconds {
				return false, true, nil
			}
			now := unversioned.NewTime(unversioned.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds)))
			objectMeta.DeletionTimestamp = &now
			objectMeta.DeletionGracePeriodSeconds = &period
			options.GracePeriodSeconds = &period
			return true, false, nil
		}
		// graceful deletion is pending, do nothing
		options.GracePeriodSeconds = objectMeta.DeletionGracePeriodSeconds
		return false, true, nil
	}

	if !gracefulStrategy.CheckGracefulDelete(obj, options) {
		return false, false, nil
	}
	now := unversioned.NewTime(unversioned.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds)))
	objectMeta.DeletionTimestamp = &now
	objectMeta.DeletionGracePeriodSeconds = options.GracePeriodSeconds
	// If it's the first graceful deletion we are going to set the DeletionTimestamp to non-nil.
	// Controllers of the object that's being deleted shouldn't take any nontrivial actions, hence its behavior changes.
	// Thus we need to bump object's Generation (if set). This handles generation bump during graceful deletion.
	// The bump for objects that don't support graceful deletion is handled in pkg/registry/generic/registry/store.go.
	if objectMeta.Generation > 0 {
		objectMeta.Generation++
	}
	return true, false, nil
}
Пример #11
0
func (m *manager) SetPodStatus(pod *api.Pod, status api.PodStatus) {
	m.podStatusesLock.Lock()
	defer m.podStatusesLock.Unlock()
	oldStatus, found := m.podStatuses[pod.UID]

	// ensure that the start time does not change across updates.
	if found && oldStatus.StartTime != nil {
		status.StartTime = oldStatus.StartTime
	}

	// Set ReadyCondition.LastTransitionTime.
	// Note we cannot do this while generating the status since we do not have oldStatus
	// at that time for mirror pods.
	if readyCondition := api.GetPodReadyCondition(status); readyCondition != nil {
		// Need to set LastTransitionTime.
		lastTransitionTime := unversioned.Now()
		if found {
			oldReadyCondition := api.GetPodReadyCondition(oldStatus)
			if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
				lastTransitionTime = oldReadyCondition.LastTransitionTime
			}
		}
		readyCondition.LastTransitionTime = lastTransitionTime
	}

	// if the status has no start time, we need to set an initial time
	// TODO(yujuhong): Consider setting StartTime when generating the pod
	// status instead, which would allow manager to become a simple cache
	// again.
	if status.StartTime.IsZero() {
		if pod.Status.StartTime.IsZero() {
			// the pod did not have a previously recorded value so set to now
			now := unversioned.Now()
			status.StartTime = &now
		} else {
			// the pod had a recorded value, but the kubelet restarted so we need to rebuild cache
			// based on last observed value
			status.StartTime = pod.Status.StartTime
		}
	}

	// TODO: Holding a lock during blocking operations is dangerous. Refactor so this isn't necessary.
	// The intent here is to prevent concurrent updates to a pod's status from
	// clobbering each other so the phase of a pod progresses monotonically.
	// Currently this routine is not called for the same pod from multiple
	// workers and/or the kubelet but dropping the lock before sending the
	// status down the channel feels like an easy way to get a bullet in foot.
	if !found || !isStatusEqual(&oldStatus, &status) || pod.DeletionTimestamp != nil {
		m.podStatuses[pod.UID] = status
		m.podStatusChannel <- podStatusSyncRequest{pod, status}
	} else {
		glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", kubeletUtil.FormatPodName(pod), status)
	}
}
Пример #12
0
func NewEmptyPolicyBinding(namespace, policyNamespace, policyBindingName string) *authorizationapi.PolicyBinding {
	binding := &authorizationapi.PolicyBinding{}
	binding.Name = policyBindingName
	binding.Namespace = namespace
	binding.CreationTimestamp = unversioned.Now()
	binding.LastModified = unversioned.Now()
	binding.PolicyRef = kapi.ObjectReference{Name: authorizationapi.PolicyName, Namespace: policyNamespace}
	binding.RoleBindings = make(map[string]*authorizationapi.RoleBinding)

	return binding
}
Пример #13
0
// createBatchPodSequential creats pods back-to-back in sequence.
func createBatchPodSequential(f *framework.Framework, pods []*api.Pod) (time.Duration, []framework.PodLatencyData) {
	batchStartTime := unversioned.Now()
	e2eLags := make([]framework.PodLatencyData, 0)
	for _, pod := range pods {
		create := unversioned.Now()
		f.PodClient().CreateSync(pod)
		e2eLags = append(e2eLags,
			framework.PodLatencyData{Name: pod.Name, Latency: unversioned.Now().Time.Sub(create.Time)})
	}
	batchLag := unversioned.Now().Time.Sub(batchStartTime.Time)
	sort.Sort(framework.LatencySlice(e2eLags))
	return batchLag, e2eLags
}
Пример #14
0
func (m *manager) SetPodStatus(pod *api.Pod, status api.PodStatus) {
	m.podStatusesLock.Lock()
	defer m.podStatusesLock.Unlock()
	oldStatus, found := m.podStatuses[pod.UID]

	// ensure that the start time does not change across updates.
	if found && oldStatus.status.StartTime != nil {
		status.StartTime = oldStatus.status.StartTime
	}

	// Set ReadyCondition.LastTransitionTime.
	// Note we cannot do this while generating the status since we do not have oldStatus
	// at that time for mirror pods.
	if readyCondition := api.GetPodReadyCondition(status); readyCondition != nil {
		// Need to set LastTransitionTime.
		lastTransitionTime := unversioned.Now()
		if found {
			oldReadyCondition := api.GetPodReadyCondition(oldStatus.status)
			if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
				lastTransitionTime = oldReadyCondition.LastTransitionTime
			}
		}
		readyCondition.LastTransitionTime = lastTransitionTime
	}

	// if the status has no start time, we need to set an initial time
	// TODO(yujuhong): Consider setting StartTime when generating the pod
	// status instead, which would allow manager to become a simple cache
	// again.
	if status.StartTime.IsZero() {
		if pod.Status.StartTime.IsZero() {
			// the pod did not have a previously recorded value so set to now
			now := unversioned.Now()
			status.StartTime = &now
		} else {
			// the pod had a recorded value, but the kubelet restarted so we need to rebuild cache
			// based on last observed value
			status.StartTime = pod.Status.StartTime
		}
	}

	newStatus := m.updateStatusInternal(pod, status)
	if newStatus != nil {
		select {
		case m.podStatusChannel <- podStatusSyncRequest{pod.UID, *newStatus}:
		default:
			// Let the periodic syncBatch handle the update if the channel is full.
			// We can't block, since we hold the mutex lock.
		}
	}
}
Пример #15
0
func (m *VirtualStorage) createRole(ctx kapi.Context, obj runtime.Object, allowEscalation bool) (*authorizationapi.Role, error) {
	if err := rest.BeforeCreate(m.CreateStrategy, ctx, obj); err != nil {
		return nil, err
	}

	role := obj.(*authorizationapi.Role)
	if !allowEscalation {
		if err := rulevalidation.ConfirmNoEscalation(ctx, authorizationapi.Resource("role"), role.Name, m.RuleResolver, authorizationinterfaces.NewLocalRoleAdapter(role)); err != nil {
			return nil, err
		}
	}

	policy, err := m.EnsurePolicy(ctx)
	if err != nil {
		return nil, err
	}
	if _, exists := policy.Roles[role.Name]; exists {
		return nil, kapierrors.NewAlreadyExists(authorizationapi.Resource("role"), role.Name)
	}

	role.ResourceVersion = policy.ResourceVersion
	policy.Roles[role.Name] = role
	policy.LastModified = unversioned.Now()

	if err := m.PolicyStorage.UpdatePolicy(ctx, policy); err != nil {
		return nil, err
	}

	return role, nil
}
Пример #16
0
func deletePods(kubeClient client.Interface, ns string, before unversioned.Time) (int64, error) {
	items, err := kubeClient.Pods(ns).List(unversioned.ListOptions{})
	if err != nil {
		return 0, err
	}
	expired := unversioned.Now().After(before.Time)
	var deleteOptions *api.DeleteOptions
	if expired {
		deleteOptions = api.NewDeleteOptions(0)
	}
	estimate := int64(0)
	for i := range items.Items {
		if items.Items[i].Spec.TerminationGracePeriodSeconds != nil {
			grace := *items.Items[i].Spec.TerminationGracePeriodSeconds
			if grace > estimate {
				estimate = grace
			}
		}
		err := kubeClient.Pods(ns).Delete(items.Items[i].Name, deleteOptions)
		if err != nil && !errors.IsNotFound(err) {
			return 0, err
		}
	}
	if expired {
		estimate = 0
	}
	return estimate, nil
}
Пример #17
0
func TestDoNotDeleteMirrorPods(t *testing.T) {
	staticPod := getTestPod()
	staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"}
	mirrorPod := getTestPod()
	mirrorPod.UID = "mirror-12345678"
	mirrorPod.Annotations = map[string]string{
		kubetypes.ConfigSourceAnnotationKey: "api",
		kubetypes.ConfigMirrorAnnotationKey: "mirror",
	}
	// Set the deletion timestamp.
	mirrorPod.DeletionTimestamp = new(unversioned.Time)
	client := fake.NewSimpleClientset(mirrorPod)
	m := newTestManager(client)
	m.podManager.AddPod(staticPod)
	m.podManager.AddPod(mirrorPod)
	// Verify setup.
	assert.True(t, kubepod.IsStaticPod(staticPod), "SetUp error: staticPod")
	assert.True(t, kubepod.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod")
	assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), staticPod.UID)

	status := getRandomPodStatus()
	now := unversioned.Now()
	status.StartTime = &now
	m.SetPodStatus(staticPod, status)

	m.testSyncBatch()
	// Expect not to see an delete action.
	verifyActions(t, m.kubeClient, []core.Action{
		core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
		core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
	})
}
Пример #18
0
func TestStaticPodStatus(t *testing.T) {
	staticPod := getTestPod()
	staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"}
	mirrorPod := getTestPod()
	mirrorPod.UID = "mirror-12345678"
	mirrorPod.Annotations = map[string]string{
		kubetypes.ConfigSourceAnnotationKey: "api",
		kubetypes.ConfigMirrorAnnotationKey: "mirror",
	}
	client := fake.NewSimpleClientset(mirrorPod)
	m := newTestManager(client)
	m.podManager.AddPod(staticPod)
	m.podManager.AddPod(mirrorPod)
	// Verify setup.
	assert.True(t, kubepod.IsStaticPod(staticPod), "SetUp error: staticPod")
	assert.True(t, kubepod.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod")
	assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), staticPod.UID)

	status := getRandomPodStatus()
	now := unversioned.Now()
	status.StartTime = &now

	m.SetPodStatus(staticPod, status)
	retrievedStatus := expectPodStatus(t, m, staticPod)
	normalizeStatus(&status)
	assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus)
	retrievedStatus, _ = m.GetPodStatus(mirrorPod.UID)
	assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus)
	// Should translate mirrorPod / staticPod UID.
	m.testSyncBatch()
	verifyActions(t, m.kubeClient, []core.Action{
		core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
		core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
	})
	updateAction := client.Actions()[1].(core.UpdateActionImpl)
	updatedPod := updateAction.Object.(*api.Pod)
	assert.Equal(t, mirrorPod.UID, updatedPod.UID, "Expected mirrorPod (%q), but got %q", mirrorPod.UID, updatedPod.UID)
	assert.True(t, isStatusEqual(&status, &updatedPod.Status), "Expected: %+v, Got: %+v", status, updatedPod.Status)
	client.ClearActions()

	// No changes.
	m.testSyncBatch()
	verifyActions(t, m.kubeClient, []core.Action{})

	// Mirror pod identity changes.
	m.podManager.DeletePod(mirrorPod)
	mirrorPod.UID = "new-mirror-pod"
	mirrorPod.Status = api.PodStatus{}
	m.podManager.AddPod(mirrorPod)
	// Expect update to new mirrorPod.
	m.testSyncBatch()
	verifyActions(t, m.kubeClient, []core.Action{
		core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
		core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
	})
	updateAction = client.Actions()[1].(core.UpdateActionImpl)
	updatedPod = updateAction.Object.(*api.Pod)
	assert.Equal(t, mirrorPod.UID, updatedPod.UID, "Expected mirrorPod (%q), but got %q", mirrorPod.UID, updatedPod.UID)
	assert.True(t, isStatusEqual(&status, &updatedPod.Status), "Expected: %+v, Got: %+v", status, updatedPod.Status)
}
Пример #19
0
func TestSyncPastDeadlineJobFinished(t *testing.T) {
	clientset := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	manager := NewJobController(clientset, controller.NoResyncPeriodFunc)
	fakePodControl := controller.FakePodControl{}
	manager.podControl = &fakePodControl
	manager.podStoreSynced = alwaysReady
	var actual *extensions.Job
	manager.updateHandler = func(job *extensions.Job) error {
		actual = job
		return nil
	}

	job := newJob(1, 1)
	activeDeadlineSeconds := int64(10)
	job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
	start := unversioned.Unix(unversioned.Now().Time.Unix()-15, 0)
	job.Status.StartTime = &start
	job.Status.Conditions = append(job.Status.Conditions, newCondition(extensions.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline"))
	manager.jobStore.Store.Add(job)
	err := manager.syncJob(getKey(job, t))
	if err != nil {
		t.Errorf("Unexpected error when syncing jobs %v", err)
	}
	if len(fakePodControl.Templates) != 0 {
		t.Errorf("Unexpected number of creates.  Expected %d, saw %d\n", 0, len(fakePodControl.Templates))
	}
	if len(fakePodControl.DeletePodName) != 0 {
		t.Errorf("Unexpected number of deletes.  Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName))
	}
	if actual != nil {
		t.Error("Unexpected job modification")
	}
}
Пример #20
0
// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the
// status has changed.
// Returns true if pod condition has changed or has been added.
func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool {
	condition.LastTransitionTime = unversioned.Now()
	// Try to find this pod condition.
	conditionIndex, oldCondition := GetPodCondition(status, condition.Type)

	if oldCondition == nil {
		// We are adding new pod condition.
		status.Conditions = append(status.Conditions, *condition)
		return true
	} else {
		// We are updating an existing condition, so we need to check if it has changed.
		if condition.Status == oldCondition.Status {
			condition.LastTransitionTime = oldCondition.LastTransitionTime
		}

		isEqual := condition.Status == oldCondition.Status &&
			condition.Reason == oldCondition.Reason &&
			condition.Message == oldCondition.Message &&
			condition.LastProbeTime.Equal(oldCondition.LastProbeTime) &&
			condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime)

		status.Conditions[conditionIndex] = *condition
		// Return true if one of the fields have changed.
		return !isEqual
	}
}
Пример #21
0
func TestDeleteNetwork(t *testing.T) {
	storage, server := newStorage(t)
	defer server.Terminate(t)
	key := etcdtest.AddPrefix("networks/foo")
	ctx := api.NewContext()
	now := unversioned.Now()
	net := &api.Network{
		ObjectMeta: api.ObjectMeta{
			Name:              "foo",
			DeletionTimestamp: &now,
		},
		Spec: api.NetworkSpec{
			TenantID: "123",
			Subnets: map[string]api.Subnet{
				"subnet1": {
					CIDR:    "192.168.0.0/24",
					Gateway: "192.168.0.1",
				},
			},
		},
		Status: api.NetworkStatus{Phase: api.NetworkInitializing},
	}
	if err := storage.Storage.Set(ctx, key, net, nil, 0); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if _, err := storage.Delete(ctx, "foo", nil); err != nil {
		t.Errorf("unexpected error: %v", err)
	}
}
Пример #22
0
// readyPods returns the old and new ready counts for their pods.
// If a pod is observed as being ready, it's considered ready even
// if it later becomes notReady.
func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController, minReadySeconds int32) (int32, int32, error) {
	controllers := []*api.ReplicationController{oldRc, newRc}
	oldReady := int32(0)
	newReady := int32(0)
	if r.nowFn == nil {
		r.nowFn = func() unversioned.Time { return unversioned.Now() }
	}

	for i := range controllers {
		controller := controllers[i]
		selector := labels.Set(controller.Spec.Selector).AsSelector()
		options := api.ListOptions{LabelSelector: selector}
		pods, err := r.c.Pods(controller.Namespace).List(options)
		if err != nil {
			return 0, 0, err
		}
		for _, pod := range pods.Items {
			if !deployment.IsPodAvailable(&pod, minReadySeconds, r.nowFn().Time) {
				continue
			}
			switch controller.Name {
			case oldRc.Name:
				oldReady++
			case newRc.Name:
				newReady++
			}
		}
	}
	return oldReady, newReady, nil
}
Пример #23
0
// NewFilterBeforePredicate is a function that returns true if the build was created before the current time minus specified duration
func NewFilterBeforePredicate(d time.Duration) FilterPredicate {
	now := unversioned.Now()
	before := unversioned.NewTime(now.Time.Add(-1 * d))
	return func(item *kapi.ReplicationController) bool {
		return item.CreationTimestamp.Before(before)
	}
}
Пример #24
0
func TestNamespaceStatusStrategy(t *testing.T) {
	ctx := api.NewDefaultContext()
	if StatusStrategy.NamespaceScoped() {
		t.Errorf("Namespaces should not be namespace scoped")
	}
	if StatusStrategy.AllowCreateOnUpdate() {
		t.Errorf("Namespaces should not allow create on update")
	}
	now := unversioned.Now()
	oldNamespace := &api.Namespace{
		ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "10", DeletionTimestamp: &now},
		Spec:       api.NamespaceSpec{Finalizers: []api.FinalizerName{"kubernetes"}},
		Status:     api.NamespaceStatus{Phase: api.NamespaceActive},
	}
	namespace := &api.Namespace{
		ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "9", DeletionTimestamp: &now},
		Status:     api.NamespaceStatus{Phase: api.NamespaceTerminating},
	}
	StatusStrategy.PrepareForUpdate(ctx, namespace, oldNamespace)
	if namespace.Status.Phase != api.NamespaceTerminating {
		t.Errorf("Namespace status updates should allow change of phase: %v", namespace.Status.Phase)
	}
	if len(namespace.Spec.Finalizers) != 1 || namespace.Spec.Finalizers[0] != api.FinalizerKubernetes {
		t.Errorf("PrepareForUpdate should have preserved old finalizers")
	}
	errs := StatusStrategy.ValidateUpdate(ctx, namespace, oldNamespace)
	if len(errs) != 0 {
		t.Errorf("Unexpected error %v", errs)
	}
	if namespace.ResourceVersion != "9" {
		t.Errorf("Incoming resource version on update should not be mutated")
	}
}
Пример #25
0
func sizedImage(id, ref string, size int64) imageapi.Image {
	image := imageWithLayers(id, ref, false, layer1, layer2, layer3, layer4, layer5)
	image.CreationTimestamp = unversioned.NewTime(unversioned.Now().Add(time.Duration(-1) * time.Minute))
	image.DockerImageMetadata.Size = size

	return image
}
Пример #26
0
// NewFilterBeforePredicate is a function that returns true if the build was created before the current time minus specified duration
func NewFilterBeforePredicate(d time.Duration) FilterPredicate {
	now := unversioned.Now()
	before := unversioned.NewTime(now.Time.Add(-1 * d))
	return func(build *buildapi.Build) bool {
		return build.CreationTimestamp.Before(before)
	}
}
Пример #27
0
// CancelBuild updates a build status to Cancelled, after its associated pod is deleted.
func (bc *BuildController) CancelBuild(build *buildapi.Build) error {
	if !isBuildCancellable(build) {
		glog.V(4).Infof("Build %s/%s can be cancelled only if it has pending/running status, not %s.", build.Namespace, build.Name, build.Status.Phase)
		return nil
	}

	glog.V(4).Infof("Cancelling build %s/%s.", build.Namespace, build.Name)

	pod, err := bc.PodManager.GetPod(build.Namespace, buildapi.GetBuildPodName(build))
	if err != nil {
		if !errors.IsNotFound(err) {
			return fmt.Errorf("Failed to get pod for build %s/%s: %v", build.Namespace, build.Name, err)
		}
	} else {
		err := bc.PodManager.DeletePod(build.Namespace, pod)
		if err != nil && !errors.IsNotFound(err) {
			return fmt.Errorf("Couldn't delete build pod %s/%s: %v", build.Namespace, pod.Name, err)
		}
	}

	build.Status.Phase = buildapi.BuildPhaseCancelled
	build.Status.Reason = ""
	build.Status.Message = ""
	now := unversioned.Now()
	build.Status.CompletionTimestamp = &now
	if err := bc.BuildUpdater.Update(build.Namespace, build); err != nil {
		return fmt.Errorf("Failed to update build %s/%s: %v", build.Namespace, build.Name, err)
	}

	glog.V(4).Infof("Build %s/%s was successfully cancelled.", build.Namespace, build.Name)
	return nil
}
Пример #28
0
// Because openshift-sdn uses an overlay and doesn't need GCE Routes, we need to
// clear the NetworkUnavailable condition that kubelet adds to initial node
// status when using GCE.
// TODO: make upstream kubelet more flexible with overlays and GCE so this
// condition doesn't get added for network plugins that don't want it, and then
// we can remove this function.
func (master *OsdnMaster) clearInitialNodeNetworkUnavailableCondition(node *kapi.Node) {
	knode := node
	cleared := false
	resultErr := kclient.RetryOnConflict(kclient.DefaultBackoff, func() error {
		var err error

		if knode != node {
			knode, err = master.kClient.Nodes().Get(node.ObjectMeta.Name)
			if err != nil {
				return err
			}
		}

		// Let caller modify knode's status, then push to api server.
		_, condition := kapi.GetNodeCondition(&node.Status, kapi.NodeNetworkUnavailable)
		if condition != nil && condition.Status != kapi.ConditionFalse && condition.Reason == "NoRouteCreated" {
			condition.Status = kapi.ConditionFalse
			condition.Reason = "RouteCreated"
			condition.Message = "openshift-sdn cleared kubelet-set NoRouteCreated"
			condition.LastTransitionTime = kapiunversioned.Now()
			knode, err = master.kClient.Nodes().UpdateStatus(knode)
			if err == nil {
				cleared = true
			}
		}
		return err
	})
	if resultErr != nil {
		utilruntime.HandleError(fmt.Errorf("Status update failed for local node: %v", resultErr))
	} else if cleared {
		log.Infof("Cleared node NetworkUnavailable/NoRouteCreated condition for %s", node.ObjectMeta.Name)
	}
}
Пример #29
0
// ResolveImageID returns latest TagEvent for specified imageID and an error if
// there's more than one image matching the ID or when one does not exist.
func ResolveImageID(stream *ImageStream, imageID string) (*TagEvent, error) {
	var event *TagEvent
	set := sets.NewString()
	for _, history := range stream.Status.Tags {
		for i := range history.Items {
			tagging := &history.Items[i]
			if d, err := digest.ParseDigest(tagging.Image); err == nil {
				if strings.HasPrefix(d.Hex(), imageID) || strings.HasPrefix(tagging.Image, imageID) {
					event = tagging
					set.Insert(tagging.Image)
				}
				continue
			}
			if strings.HasPrefix(tagging.Image, imageID) {
				event = tagging
				set.Insert(tagging.Image)
			}
		}
	}
	switch len(set) {
	case 1:
		return &TagEvent{
			Created:              unversioned.Now(),
			DockerImageReference: event.DockerImageReference,
			Image:                event.Image,
		}, nil
	case 0:
		return nil, errors.NewNotFound(Resource("imagestreamimage"), imageID)
	default:
		return nil, errors.NewConflict(Resource("imagestreamimage"), imageID, fmt.Errorf("multiple images match the prefix %q: %s", imageID, strings.Join(set.List(), ", ")))
	}
}
Пример #30
0
func TestNetworkStatusStrategy(t *testing.T) {
	ctx := api.NewDefaultContext()
	if StatusStrategy.AllowCreateOnUpdate() {
		t.Errorf("Networks should not allow create on update")
	}
	now := unversioned.Now()
	oldNetwork := &api.Network{
		ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "10"},
		Spec: api.NetworkSpec{
			TenantID: "12345",
			Subnets:  map[string]api.Subnet{"s1": {CIDR: "192.168.0.0/24", Gateway: "192.168.0.1"}}},
		Status: api.NetworkStatus{Phase: api.NetworkActive},
	}
	network := &api.Network{
		ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "9", DeletionTimestamp: &now},
		Spec: api.NetworkSpec{
			TenantID: "12345",
			Subnets:  map[string]api.Subnet{"s1": {CIDR: "192.168.0.0/24", Gateway: "192.168.0.1"}}},
		Status: api.NetworkStatus{Phase: api.NetworkTerminating},
	}
	StatusStrategy.PrepareForUpdate(network, oldNetwork)
	if network.Status.Phase != api.NetworkTerminating {
		t.Errorf("Network status updates should allow change of phase: %v", network.Status.Phase)
	}
	errs := StatusStrategy.ValidateUpdate(ctx, network, oldNetwork)
	if len(errs) != 0 {
		t.Errorf("Unexpected error %v", errs)
	}
	if network.ResourceVersion != "9" {
		t.Errorf("Incoming resource version on update should not be mutated")
	}
}