func unstructuredToPod(obj *runtime.Unstructured) (*v1.Pod, error) { json, err := runtime.Encode(runtime.UnstructuredJSONScheme, obj) if err != nil { return nil, err } pod := new(v1.Pod) err = runtime.DecodeInto(testapi.Default.Codec(), json, pod) pod.Kind = "" pod.APIVersion = "" return pod, err }
func getTestPod() *v1.Pod { container := v1.Container{ Name: testContainerName, } pod := v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{container}, RestartPolicy: v1.RestartPolicyNever, }, } pod.Name = "testPod" pod.UID = testPodUID return &pod }
// ToAPIPod converts Pod to v1.Pod. Note that if a field in v1.Pod has no // corresponding field in Pod, the field would not be populated. func (p *Pod) ToAPIPod() *v1.Pod { var pod v1.Pod pod.UID = p.ID pod.Name = p.Name pod.Namespace = p.Namespace for _, c := range p.Containers { var container v1.Container container.Name = c.Name container.Image = c.Image pod.Spec.Containers = append(pod.Spec.Containers, container) } return &pod }
// Sets the name of the profile to use with the container. func SetProfileName(pod *v1.Pod, containerName, profileName string) error { if pod.Annotations == nil { pod.Annotations = map[string]string{} } pod.Annotations[ContainerAnnotationKeyPrefix+containerName] = profileName return nil }
func addPodConditionReady(pod *v1.Pod) { pod.Status = v1.PodStatus{ Phase: v1.PodRunning, Conditions: []v1.PodCondition{ { Type: v1.PodReady, Status: v1.ConditionTrue, }, }, } }
// updateAnnotations returns an Annotation map containing the api annotation map plus // locally managed annotations func updateAnnotations(existing, ref *v1.Pod) { annotations := make(map[string]string, len(ref.Annotations)+len(localAnnotations)) for k, v := range ref.Annotations { annotations[k] = v } for _, k := range localAnnotations { if v, ok := existing.Annotations[k]; ok { annotations[k] = v } } existing.Annotations = annotations }
// TODO: remove this function when init containers becomes a stable feature func SetInitContainersAnnotations(pod *v1.Pod) error { if len(pod.Spec.InitContainers) > 0 { value, err := json.Marshal(pod.Spec.InitContainers) if err != nil { return err } if pod.Annotations == nil { pod.Annotations = make(map[string]string) } pod.Annotations[v1.PodInitContainersAnnotationKey] = string(value) pod.Annotations[v1.PodInitContainersBetaAnnotationKey] = string(value) } return nil }
// checkAndUpdatePod updates existing, and: // * if ref makes a meaningful change, returns needUpdate=true // * if ref makes a meaningful change, and this change is graceful deletion, returns needGracefulDelete=true // * if ref makes no meaningful change, but changes the pod status, returns needReconcile=true // * else return all false // Now, needUpdate, needGracefulDelete and needReconcile should never be both true func checkAndUpdatePod(existing, ref *v1.Pod) (needUpdate, needReconcile, needGracefulDelete bool) { // 1. this is a reconcile // TODO: it would be better to update the whole object and only preserve certain things // like the source annotation or the UID (to ensure safety) if !podsDifferSemantically(existing, ref) { // this is not an update // Only check reconcile when it is not an update, because if the pod is going to // be updated, an extra reconcile is unnecessary if !reflect.DeepEqual(existing.Status, ref.Status) { // Pod with changed pod status needs reconcile, because kubelet should // be the source of truth of pod status. existing.Status = ref.Status needReconcile = true } return } // Overwrite the first-seen time with the existing one. This is our own // internal annotation, there is no need to update. ref.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] = existing.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] existing.Spec = ref.Spec existing.Labels = ref.Labels existing.DeletionTimestamp = ref.DeletionTimestamp existing.DeletionGracePeriodSeconds = ref.DeletionGracePeriodSeconds existing.Status = ref.Status updateAnnotations(existing, ref) // 2. this is an graceful delete if ref.DeletionTimestamp != nil { needGracefulDelete = true } else { // 3. this is an update needUpdate = true } return }
// same as above func comments, except 'recyclerClient' is a narrower pod API // interface to ease testing func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, recyclerClient recyclerClient) error { glog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name) // Generate unique name for the recycler pod - we need to get "already // exists" error when a previous controller has already started recycling // the volume. Here we assume that pv.Name is already unique. pod.Name = "recycler-for-" + pvName pod.GenerateName = "" stopChannel := make(chan struct{}) defer close(stopChannel) podCh, err := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel) if err != nil { glog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err) return err } // Start the pod _, err = recyclerClient.CreatePod(pod) if err != nil { if errors.IsAlreadyExists(err) { glog.V(5).Infof("old recycler pod %q found for volume", pod.Name) } else { return fmt.Errorf("unexpected error creating recycler pod: %+v\n", err) } } defer func(pod *v1.Pod) { glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name) if err := recyclerClient.DeletePod(pod.Name, pod.Namespace); err != nil { glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err) } }(pod) // Now only the old pod or the new pod run. Watch it until it finishes // and send all events on the pod to the PV for { event := <-podCh switch event.Object.(type) { case *v1.Pod: // POD changed pod := event.Object.(*v1.Pod) glog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase) switch event.Type { case watch.Added, watch.Modified: if pod.Status.Phase == v1.PodSucceeded { // Recycle succeeded. return nil } if pod.Status.Phase == v1.PodFailed { if pod.Status.Message != "" { return fmt.Errorf(pod.Status.Message) } else { return fmt.Errorf("pod failed, pod.Status.Message unknown.") } } case watch.Deleted: return fmt.Errorf("recycler pod was deleted") case watch.Error: return fmt.Errorf("recycler pod watcher failed") } case *v1.Event: // Event received podEvent := event.Object.(*v1.Event) glog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message) if event.Type == watch.Added { recyclerClient.Event(podEvent.Type, podEvent.Message) } } } }
// SetIdentity sets the pet namespace and name. func (n *NameIdentityMapper) SetIdentity(id string, pet *v1.Pod) { pet.Name = fmt.Sprintf("%v-%v", n.ps.Name, id) pet.Namespace = n.ps.Namespace return }
func TestSyncBatchNoDeadlock(t *testing.T) { client := &fake.Clientset{} m := newTestManager(client) pod := getTestPod() // Setup fake client. var ret v1.Pod var err error client.AddReactor("*", "pods", func(action core.Action) (bool, runtime.Object, error) { switch action := action.(type) { case core.GetAction: assert.Equal(t, pod.Name, action.GetName(), "Unexpeted GetAction: %+v", action) case core.UpdateAction: assert.Equal(t, pod.Name, action.GetObject().(*v1.Pod).Name, "Unexpeted UpdateAction: %+v", action) default: assert.Fail(t, "Unexpected Action: %+v", action) } return true, &ret, err }) pod.Status.ContainerStatuses = []v1.ContainerStatus{{State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}}} getAction := core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}} updateAction := core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}} // Pod not found. ret = *pod err = errors.NewNotFound(api.Resource("pods"), pod.Name) m.SetPodStatus(pod, getRandomPodStatus()) m.testSyncBatch() verifyActions(t, client, []core.Action{getAction}) client.ClearActions() // Pod was recreated. ret.UID = "other_pod" err = nil m.SetPodStatus(pod, getRandomPodStatus()) m.testSyncBatch() verifyActions(t, client, []core.Action{getAction}) client.ClearActions() // Pod not deleted (success case). ret = *pod m.SetPodStatus(pod, getRandomPodStatus()) m.testSyncBatch() verifyActions(t, client, []core.Action{getAction, updateAction}) client.ClearActions() // Pod is terminated, but still running. pod.DeletionTimestamp = new(metav1.Time) m.SetPodStatus(pod, getRandomPodStatus()) m.testSyncBatch() verifyActions(t, client, []core.Action{getAction, updateAction}) client.ClearActions() // Pod is terminated successfully. pod.Status.ContainerStatuses[0].State.Running = nil pod.Status.ContainerStatuses[0].State.Terminated = &v1.ContainerStateTerminated{} m.SetPodStatus(pod, getRandomPodStatus()) m.testSyncBatch() verifyActions(t, client, []core.Action{getAction, updateAction}) client.ClearActions() // Error case. err = fmt.Errorf("intentional test error") m.SetPodStatus(pod, getRandomPodStatus()) m.testSyncBatch() verifyActions(t, client, []core.Action{getAction}) client.ClearActions() }