func createEvent(eventType eventType, selfUID string, owners []string) event {
	var ownerReferences []api.OwnerReference
	for i := 0; i < len(owners); i++ {
		ownerReferences = append(ownerReferences, api.OwnerReference{UID: types.UID(owners[i])})
	}
	return event{
		eventType: eventType,
		obj: &api.Pod{
			ObjectMeta: api.ObjectMeta{
				UID:             types.UID(selfUID),
				OwnerReferences: ownerReferences,
			},
		},
	}
}
func TestReasonCache(t *testing.T) {
	// Create test sync result
	syncResult := kubecontainer.PodSyncResult{}
	results := []*kubecontainer.SyncResult{
		// reason cache should be set for SyncResult with StartContainer action and error
		kubecontainer.NewSyncResult(kubecontainer.StartContainer, "container_1"),
		// reason cache should not be set for SyncResult with StartContainer action but without error
		kubecontainer.NewSyncResult(kubecontainer.StartContainer, "container_2"),
		// reason cache should not be set for SyncResult with other actions
		kubecontainer.NewSyncResult(kubecontainer.KillContainer, "container_3"),
	}
	results[0].Fail(kubecontainer.ErrRunContainer, "message_1")
	results[2].Fail(kubecontainer.ErrKillContainer, "message_3")
	syncResult.AddSyncResult(results...)
	uid := types.UID("pod_1")

	reasonCache := NewReasonCache()
	reasonCache.Update(uid, syncResult)
	assertReasonInfo(t, reasonCache, uid, results[0], true)
	assertReasonInfo(t, reasonCache, uid, results[1], false)
	assertReasonInfo(t, reasonCache, uid, results[2], false)

	reasonCache.Remove(uid, results[0].Target.(string))
	assertReasonInfo(t, reasonCache, uid, results[0], false)
}
Esempio n. 3
0
func TestMounterAndUnmounterTypeAssert(t *testing.T) {
	tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
	if err != nil {
		t.Fatalf("can't make a temp dir: %v", err)
	}
	defer os.RemoveAll(tmpDir)
	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))

	plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}
	spec := &api.Volume{
		Name: "vol1",
		VolumeSource: api.VolumeSource{
			AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
				VolumeID: "pd",
				FSType:   "ext4",
			},
		},
	}

	mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
	if _, ok := mounter.(volume.Unmounter); ok {
		t.Errorf("Volume Mounter can be type-assert to Unmounter")
	}

	unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
	if _, ok := unmounter.(volume.Mounter); ok {
		t.Errorf("Volume Unmounter can be type-assert to Mounter")
	}
}
Esempio n. 4
0
func getContainerInfoFromLabel(labels map[string]string) *labelledContainerInfo {
	var err error
	containerInfo := &labelledContainerInfo{
		PodName:      getStringValueFromLabel(labels, types.KubernetesPodNameLabel),
		PodNamespace: getStringValueFromLabel(labels, types.KubernetesPodNamespaceLabel),
		PodUID:       kubetypes.UID(getStringValueFromLabel(labels, types.KubernetesPodUIDLabel)),
		Name:         getStringValueFromLabel(labels, types.KubernetesContainerNameLabel),
		Hash:         getStringValueFromLabel(labels, kubernetesContainerHashLabel),
		TerminationMessagePath: getStringValueFromLabel(labels, kubernetesContainerTerminationMessagePathLabel),
	}
	if containerInfo.RestartCount, err = getIntValueFromLabel(labels, kubernetesContainerRestartCountLabel); err != nil {
		logError(containerInfo, kubernetesContainerRestartCountLabel, err)
	}
	if containerInfo.PodDeletionGracePeriod, err = getInt64PointerFromLabel(labels, kubernetesPodDeletionGracePeriodLabel); err != nil {
		logError(containerInfo, kubernetesPodDeletionGracePeriodLabel, err)
	}
	if containerInfo.PodTerminationGracePeriod, err = getInt64PointerFromLabel(labels, kubernetesPodTerminationGracePeriodLabel); err != nil {
		logError(containerInfo, kubernetesPodTerminationGracePeriodLabel, err)
	}
	preStopHandler := &api.Handler{}
	if found, err := getJsonObjectFromLabel(labels, kubernetesContainerPreStopHandlerLabel, preStopHandler); err != nil {
		logError(containerInfo, kubernetesContainerPreStopHandlerLabel, err)
	} else if found {
		containerInfo.PreStopHandler = preStopHandler
	}
	supplyContainerInfoWithOldLabel(labels, containerInfo)
	return containerInfo
}
Esempio n. 5
0
//  This test the fast-fail path. We test that the precondition gets verified
//  again before deleting the object in tests of pkg/storage/etcd.
func (t *Tester) testDeleteWithUID(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc) {
	ctx := t.TestContext()

	foo := copyOrDie(obj)
	t.setObjectMeta(foo, t.namer(1))
	objectMeta := t.getObjectMetaOrFail(foo)
	objectMeta.UID = types.UID("UID0000")
	if err := createFn(ctx, foo); err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	obj, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewPreconditionDeleteOptions("UID1111"))
	if err == nil || !errors.IsConflict(err) {
		t.Errorf("unexpected error: %v", err)
	}

	obj, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewPreconditionDeleteOptions("UID0000"))
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	if !t.returnDeletedObject {
		if status, ok := obj.(*unversioned.Status); !ok {
			t.Errorf("expected status of delete, got %v", status)
		} else if status.Status != unversioned.StatusSuccess {
			t.Errorf("expected success, got: %v", status.Status)
		}
	}

	_, err = getFn(ctx, foo)
	if err == nil || !isNotFoundFn(err) {
		t.Errorf("unexpected error: %v", err)
	}
}
Esempio n. 6
0
func TestPluginBackCompat(t *testing.T) {
	basePath, err := utiltesting.MkTmpdir("emptydirTest")
	if err != nil {
		t.Fatalf("can't make a temp dir: %v", err)
	}
	defer os.RemoveAll(basePath)

	plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath, "" /* rootContext */)

	spec := &api.Volume{
		Name: "vol1",
	}
	pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
	mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{})
	if err != nil {
		t.Errorf("Failed to make a new Mounter: %v", err)
	}
	if mounter == nil {
		t.Errorf("Got a nil Mounter")
	}

	volPath := mounter.GetPath()
	if volPath != path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/vol1") {
		t.Errorf("Got unexpected path: %s", volPath)
	}
}
// TestDependentsRace relies on golang's data race detector to check if there is
// data race among in the dependents field.
func TestDependentsRace(t *testing.T) {
	clientPool := dynamic.NewClientPool(&restclient.Config{}, dynamic.LegacyAPIPathResolverFunc)
	podResource := []unversioned.GroupVersionResource{{Version: "v1", Resource: "pods"}}
	gc, err := NewGarbageCollector(clientPool, podResource)
	if err != nil {
		t.Fatal(err)
	}

	const updates = 100
	owner := &node{dependentsLock: &sync.RWMutex{}, dependents: make(map[*node]struct{})}
	ownerUID := types.UID("owner")
	gc.propagator.uidToNode.Write(owner)
	go func() {
		for i := 0; i < updates; i++ {
			dependent := &node{}
			gc.propagator.addDependentToOwners(dependent, []metatypes.OwnerReference{{UID: ownerUID}})
			gc.propagator.removeDependentFromOwners(dependent, []metatypes.OwnerReference{{UID: ownerUID}})
		}
	}()
	go func() {
		gc.orphanQueue.Add(owner)
		for i := 0; i < updates; i++ {
			gc.orphanFinalizer()
		}
	}()
}
Esempio n. 8
0
func (t *Tester) testUpdateWithWrongUID(obj runtime.Object, createFn CreateFunc, getFn GetFunc) {
	ctx := t.TestContext()
	foo := copyOrDie(obj)
	t.setObjectMeta(foo, t.namer(5))
	objectMeta := t.getObjectMetaOrFail(foo)
	objectMeta.UID = types.UID("UID0000")
	if err := createFn(ctx, foo); err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	objectMeta.UID = types.UID("UID1111")

	obj, created, err := t.storage.(rest.Updater).Update(ctx, objectMeta.Name, rest.DefaultUpdatedObjectInfo(foo, api.Scheme))
	if created || obj != nil {
		t.Errorf("expected nil object and no creation for object: %v", foo)
	}
	if err == nil || !errors.IsConflict(err) {
		t.Errorf("unexpected error: %v", err)
	}
}
Esempio n. 9
0
func (nc *NodeController) recordNodeEvent(nodeName, eventtype, reason, event string) {
	ref := &api.ObjectReference{
		Kind:      "Node",
		Name:      nodeName,
		UID:       types.UID(nodeName),
		Namespace: "",
	}
	glog.V(2).Infof("Recording %s event message for node %s", event, nodeName)
	nc.recorder.Eventf(ref, eventtype, reason, "Node %s event: %s", nodeName, event)
}
Esempio n. 10
0
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
	tmpDir, err := utiltesting.MkTmpdir("iscsi_test")
	if err != nil {
		t.Fatalf("error creating temp dir: %v", err)
	}
	defer os.RemoveAll(tmpDir)

	pv := &api.PersistentVolume{
		ObjectMeta: api.ObjectMeta{
			Name: "pvA",
		},
		Spec: api.PersistentVolumeSpec{
			PersistentVolumeSource: api.PersistentVolumeSource{
				ISCSI: &api.ISCSIVolumeSource{
					TargetPortal: "127.0.0.1:3260",
					IQN:          "iqn.2014-12.server:storage.target01",
					FSType:       "ext4",
					Lun:          0,
				},
			},
			ClaimRef: &api.ObjectReference{
				Name: "claimA",
			},
		},
	}

	claim := &api.PersistentVolumeClaim{
		ObjectMeta: api.ObjectMeta{
			Name:      "claimA",
			Namespace: "nsA",
		},
		Spec: api.PersistentVolumeClaimSpec{
			VolumeName: "pvA",
		},
		Status: api.PersistentVolumeClaimStatus{
			Phase: api.ClaimBound,
		},
	}

	client := fake.NewSimpleClientset(pv, claim)

	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil, "" /* rootContext */))
	plug, _ := plugMgr.FindPluginByName(iscsiPluginName)

	// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
	spec := volume.NewSpecFromPersistentVolume(pv, true)
	pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
	mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})

	if !mounter.GetAttributes().ReadOnly {
		t.Errorf("Expected true for mounter.IsReadOnly")
	}
}
Esempio n. 11
0
func (nc *NodeController) recordNodeStatusChange(node *api.Node, new_status string) {
	ref := &api.ObjectReference{
		Kind:      "Node",
		Name:      node.Name,
		UID:       types.UID(node.Name),
		Namespace: "",
	}
	glog.V(2).Infof("Recording status change %s event message for node %s", new_status, node.Name)
	// TODO: This requires a transaction, either both node status is updated
	// and event is recorded or neither should happen, see issue #6055.
	nc.recorder.Eventf(ref, api.EventTypeNormal, new_status, "Node %s status is now: %s", node.Name, new_status)
}
Esempio n. 12
0
func newPetSetWithVolumes(replicas int, name string, petMounts []api.VolumeMount, podMounts []api.VolumeMount) *apps.PetSet {
	mounts := append(petMounts, podMounts...)
	claims := []api.PersistentVolumeClaim{}
	for _, m := range petMounts {
		claims = append(claims, newPVC(m.Name))
	}

	vols := []api.Volume{}
	for _, m := range podMounts {
		vols = append(vols, api.Volume{
			Name: m.Name,
			VolumeSource: api.VolumeSource{
				HostPath: &api.HostPathVolumeSource{
					Path: fmt.Sprintf("/tmp/%v", m.Name),
				},
			},
		})
	}

	return &apps.PetSet{
		TypeMeta: unversioned.TypeMeta{
			Kind:       "PetSet",
			APIVersion: "apps/v1beta1",
		},
		ObjectMeta: api.ObjectMeta{
			Name:      name,
			Namespace: api.NamespaceDefault,
			UID:       types.UID("test"),
		},
		Spec: apps.PetSetSpec{
			Selector: &unversioned.LabelSelector{
				MatchLabels: map[string]string{"foo": "bar"},
			},
			Replicas: replicas,
			Template: api.PodTemplateSpec{
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:         "nginx",
							Image:        "nginx",
							VolumeMounts: mounts,
						},
					},
					Volumes: vols,
				},
			},
			VolumeClaimTemplates: claims,
			ServiceName:          "governingsvc",
		},
	}
}
Esempio n. 13
0
func TestGetWork(t *testing.T) {
	q, clock := newTestBasicWorkQueue()
	q.Enqueue(types.UID("foo1"), -1*time.Minute)
	q.Enqueue(types.UID("foo2"), -1*time.Minute)
	q.Enqueue(types.UID("foo3"), 1*time.Minute)
	q.Enqueue(types.UID("foo4"), 1*time.Minute)
	expected := []types.UID{types.UID("foo1"), types.UID("foo2")}
	compareResults(t, expected, q.GetWork())
	compareResults(t, []types.UID{}, q.GetWork())
	// Dial the time to 1 hour ahead.
	clock.Step(time.Hour)
	expected = []types.UID{types.UID("foo3"), types.UID("foo4")}
	compareResults(t, expected, q.GetWork())
	compareResults(t, []types.UID{}, q.GetWork())
}
Esempio n. 14
0
func TestPatchWithVersionConflictThenAdmissionFailure(t *testing.T) {
	namespace := "bar"
	name := "foo"
	uid := types.UID("uid")
	fifteen := int64(15)
	thirty := int64(30)
	seen := false

	tc := &patchTestCase{
		name: "TestPatchWithVersionConflictThenAdmissionFailure",

		admit: func(updatedObject runtime.Object, currentObject runtime.Object) error {
			if seen {
				return errors.New("admission failure")
			}

			seen = true
			return nil
		},

		startingPod: &api.Pod{},
		changedPod:  &api.Pod{},
		updatePod:   &api.Pod{},

		expectedError: "admission failure",
	}

	tc.startingPod.Name = name
	tc.startingPod.Namespace = namespace
	tc.startingPod.UID = uid
	tc.startingPod.ResourceVersion = "1"
	tc.startingPod.APIVersion = "v1"
	tc.startingPod.Spec.ActiveDeadlineSeconds = &fifteen

	tc.changedPod.Name = name
	tc.changedPod.Namespace = namespace
	tc.changedPod.UID = uid
	tc.changedPod.ResourceVersion = "1"
	tc.changedPod.APIVersion = "v1"
	tc.changedPod.Spec.ActiveDeadlineSeconds = &thirty

	tc.updatePod.Name = name
	tc.updatePod.Namespace = namespace
	tc.updatePod.UID = uid
	tc.updatePod.ResourceVersion = "2"
	tc.updatePod.APIVersion = "v1"
	tc.updatePod.Spec.ActiveDeadlineSeconds = &fifteen
	tc.updatePod.Spec.NodeName = "anywhere"

	tc.Run(t)
}
Esempio n. 15
0
func NewUUID() types.UID {
	uuidLock.Lock()
	defer uuidLock.Unlock()
	result := uuid.NewUUID()
	// The UUID package is naive and can generate identical UUIDs if the
	// time interval is quick enough.
	// The UUID uses 100 ns increments so it's short enough to actively
	// wait for a new value.
	for uuid.Equal(lastUUID, result) == true {
		result = uuid.NewUUID()
	}
	lastUUID = result
	return types.UID(result.String())
}
Esempio n. 16
0
func TestJobStrategyWithGeneration(t *testing.T) {
	ctx := api.NewDefaultContext()

	theUID := types.UID("1a2b3c4d5e6f7g8h9i0k")

	validPodTemplateSpec := api.PodTemplateSpec{
		Spec: api.PodSpec{
			RestartPolicy: api.RestartPolicyOnFailure,
			DNSPolicy:     api.DNSClusterFirst,
			Containers:    []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}},
		},
	}
	job := &batch.Job{
		ObjectMeta: api.ObjectMeta{
			Name:      "myjob2",
			Namespace: api.NamespaceDefault,
			UID:       theUID,
		},
		Spec: batch.JobSpec{
			Selector: nil,
			Template: validPodTemplateSpec,
		},
	}

	Strategy.PrepareForCreate(job)
	errs := Strategy.Validate(ctx, job)
	if len(errs) != 0 {
		t.Errorf("Unexpected error validating %v", errs)
	}

	// Validate the stuff that validation should have validated.
	if job.Spec.Selector == nil {
		t.Errorf("Selector not generated")
	}
	expectedLabels := make(map[string]string)
	expectedLabels["controller-uid"] = string(theUID)
	if !reflect.DeepEqual(job.Spec.Selector.MatchLabels, expectedLabels) {
		t.Errorf("Expected label selector not generated")
	}
	if job.Spec.Template.ObjectMeta.Labels == nil {
		t.Errorf("Expected template labels not generated")
	}
	if v, ok := job.Spec.Template.ObjectMeta.Labels["job-name"]; !ok || v != "myjob2" {
		t.Errorf("Expected template labels not present")
	}
	if v, ok := job.Spec.Template.ObjectMeta.Labels["controller-uid"]; !ok || v != string(theUID) {
		t.Errorf("Expected template labels not present: ok: %v, v: %v", ok, v)
	}
}
Esempio n. 17
0
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
	pv := &api.PersistentVolume{
		ObjectMeta: api.ObjectMeta{
			Name: "pvA",
		},
		Spec: api.PersistentVolumeSpec{
			PersistentVolumeSource: api.PersistentVolumeSource{
				GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{},
			},
			ClaimRef: &api.ObjectReference{
				Name: "claimA",
			},
		},
	}

	claim := &api.PersistentVolumeClaim{
		ObjectMeta: api.ObjectMeta{
			Name:      "claimA",
			Namespace: "nsA",
		},
		Spec: api.PersistentVolumeClaimSpec{
			VolumeName: "pvA",
		},
		Status: api.PersistentVolumeClaimStatus{
			Phase: api.ClaimBound,
		},
	}

	client := fake.NewSimpleClientset(pv, claim)

	tmpDir, err := utiltesting.MkTmpdir("gcepdTest")
	if err != nil {
		t.Fatalf("can't make a temp dir: %v", err)
	}
	defer os.RemoveAll(tmpDir)
	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil, "" /* rootContext */))
	plug, _ := plugMgr.FindPluginByName(gcePersistentDiskPluginName)

	// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
	spec := volume.NewSpecFromPersistentVolume(pv, true)
	pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
	mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})

	if !mounter.GetAttributes().ReadOnly {
		t.Errorf("Expected true for mounter.IsReadOnly")
	}
}
Esempio n. 18
0
func TestPatchResourceWithVersionConflict(t *testing.T) {
	namespace := "bar"
	name := "foo"
	uid := types.UID("uid")
	fifteen := int64(15)
	thirty := int64(30)

	tc := &patchTestCase{
		name: "TestPatchResourceWithVersionConflict",

		startingPod: &api.Pod{},
		changedPod:  &api.Pod{},
		updatePod:   &api.Pod{},

		expectedPod: &api.Pod{},
	}

	tc.startingPod.Name = name
	tc.startingPod.Namespace = namespace
	tc.startingPod.UID = uid
	tc.startingPod.ResourceVersion = "1"
	tc.startingPod.APIVersion = "v1"
	tc.startingPod.Spec.ActiveDeadlineSeconds = &fifteen

	tc.changedPod.Name = name
	tc.changedPod.Namespace = namespace
	tc.changedPod.UID = uid
	tc.changedPod.ResourceVersion = "1"
	tc.changedPod.APIVersion = "v1"
	tc.changedPod.Spec.ActiveDeadlineSeconds = &thirty

	tc.updatePod.Name = name
	tc.updatePod.Namespace = namespace
	tc.updatePod.UID = uid
	tc.updatePod.ResourceVersion = "2"
	tc.updatePod.APIVersion = "v1"
	tc.updatePod.Spec.ActiveDeadlineSeconds = &fifteen
	tc.updatePod.Spec.NodeName = "anywhere"

	tc.expectedPod.Name = name
	tc.expectedPod.Namespace = namespace
	tc.expectedPod.UID = uid
	tc.expectedPod.ResourceVersion = "2"
	tc.expectedPod.Spec.ActiveDeadlineSeconds = &thirty
	tc.expectedPod.Spec.NodeName = "anywhere"

	tc.Run(t)
}
Esempio n. 19
0
// buildSummaryPods aggregates and returns the container stats in cinfos by the Pod managing the container.
// Containers not managed by a Pod are omitted.
func (sb *summaryBuilder) buildSummaryPods() []stats.PodStats {
	// Map each container to a pod and update the PodStats with container data
	podToStats := map[stats.PodReference]*stats.PodStats{}
	for key, cinfo := range sb.infos {
		// on systemd using devicemapper each mount into the container has an associated cgroup.
		// we ignore them to ensure we do not get duplicate entries in our summary.
		// for details on .mount units: http://man7.org/linux/man-pages/man5/systemd.mount.5.html
		if strings.HasSuffix(key, ".mount") {
			continue
		}
		// Build the Pod key if this container is managed by a Pod
		if !sb.isPodManagedContainer(&cinfo) {
			continue
		}
		ref := sb.buildPodRef(&cinfo)

		// Lookup the PodStats for the pod using the PodRef.  If none exists, initialize a new entry.
		podStats, found := podToStats[ref]
		if !found {
			podStats = &stats.PodStats{PodRef: ref}
			podToStats[ref] = podStats
		}

		// Update the PodStats entry with the stats from the container by adding it to stats.Containers
		containerName := types.GetContainerName(cinfo.Spec.Labels)
		if containerName == leaky.PodInfraContainerName {
			// Special case for infrastructure container which is hidden from the user and has network stats
			podStats.Network = sb.containerInfoV2ToNetworkStats("pod:"+ref.Namespace+"_"+ref.Name, &cinfo)
			podStats.StartTime = unversioned.NewTime(cinfo.Spec.CreationTime)
		} else {
			podStats.Containers = append(podStats.Containers, sb.containerInfoV2ToStats(containerName, &cinfo))
		}
	}

	// Add each PodStats to the result
	result := make([]stats.PodStats, 0, len(podToStats))
	for _, podStats := range podToStats {
		// Lookup the volume stats for each pod
		podUID := kubetypes.UID(podStats.PodRef.UID)
		if vstats, found := sb.fsResourceAnalyzer.GetPodVolumeStats(podUID); found {
			podStats.VolumeStats = vstats.Volumes
		}
		result = append(result, *podStats)
	}
	return result
}
Esempio n. 20
0
// TestMetrics tests that MetricProvider methods return sane values.
func TestMetrics(t *testing.T) {
	// Create an empty temp directory for the volume
	tmpDir, err := utiltesting.MkTmpdir("empty_dir_test")
	if err != nil {
		t.Fatalf("Can't make a tmp dir: %v", err)
	}
	defer os.RemoveAll(tmpDir)

	plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", tmpDir, "" /* rootContext */)

	spec := &api.Volume{
		Name: "vol1",
	}
	pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
	mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{})
	if err != nil {
		t.Errorf("Failed to make a new Mounter: %v", err)
	}

	// Need to create the subdirectory
	os.MkdirAll(mounter.GetPath(), 0755)

	expectedEmptyDirUsage, err := volumetest.FindEmptyDirectoryUsageOnTmpfs()
	if err != nil {
		t.Errorf("Unexpected error finding expected empty directory usage on tmpfs: %v", err)
	}

	// TODO(pwittroc): Move this into a reusable testing utility
	metrics, err := mounter.GetMetrics()
	if err != nil {
		t.Errorf("Unexpected error when calling GetMetrics %v", err)
	}
	if e, a := expectedEmptyDirUsage.Value(), metrics.Used.Value(); e != a {
		t.Errorf("Unexpected value for empty directory; expected %v, got %v", e, a)
	}
	if metrics.Capacity.Value() <= 0 {
		t.Errorf("Expected Capacity to be greater than 0")
	}
	if metrics.Available.Value() <= 0 {
		t.Errorf("Expected Available to be greater than 0")
	}
}
Esempio n. 21
0
func CreateValidPod(name, namespace string) *api.Pod {
	return &api.Pod{
		ObjectMeta: api.ObjectMeta{
			UID:       types.UID(name), // for the purpose of testing, this is unique enough
			Name:      name,
			Namespace: namespace,
		},
		Spec: api.PodSpec{
			RestartPolicy: api.RestartPolicyAlways,
			DNSPolicy:     api.DNSClusterFirst,
			Containers: []api.Container{
				{
					Name:            "ctr",
					Image:           "image",
					ImagePullPolicy: "IfNotPresent",
					SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
				},
			},
		},
	}
}
Esempio n. 22
0
// Handles kubernetes pod/container stats requests to:
// /stats/<pod name>/<container name>
// /stats/<namespace>/<pod name>/<uid>/<container name>
func (h *handler) handlePodContainer(request *restful.Request, response *restful.Response) {
	query, err := parseStatsRequest(request)
	if err != nil {
		handleError(response, request.Request.URL.String(), err)
		return
	}

	// Default parameters.
	params := map[string]string{
		"namespace": api.NamespaceDefault,
		"uid":       "",
	}
	for k, v := range request.PathParameters() {
		params[k] = v
	}

	if params["podName"] == "" || params["containerName"] == "" {
		response.WriteErrorString(http.StatusBadRequest,
			fmt.Sprintf("Invalid pod container request: %v", params))
		return
	}

	pod, ok := h.provider.GetPodByName(params["namespace"], params["podName"])
	if !ok {
		glog.V(4).Infof("Container not found: %v", params)
		response.WriteError(http.StatusNotFound, kubecontainer.ErrContainerNotFound)
		return
	}
	stats, err := h.provider.GetContainerInfo(
		kubecontainer.GetPodFullName(pod),
		types.UID(params["uid"]),
		params["containerName"],
		query.cadvisorRequest())

	if err != nil {
		handleError(response, fmt.Sprintf("%s %v", request.Request.URL.String(), query), err)
		return
	}
	writeResponse(response, stats)
}
Esempio n. 23
0
func TestHasUID(t *testing.T) {
	testcases := []struct {
		obj    runtime.Object
		hasUID bool
	}{
		{obj: nil, hasUID: false},
		{obj: &api.Pod{}, hasUID: false},
		{obj: nil, hasUID: false},
		{obj: runtime.Object(nil), hasUID: false},
		{obj: &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("A")}}, hasUID: true},
	}
	for i, tc := range testcases {
		actual, err := hasUID(tc.obj)
		if err != nil {
			t.Errorf("%d: unexpected error %v", i, err)
			continue
		}
		if tc.hasUID != actual {
			t.Errorf("%d: expected %v, got %v", i, tc.hasUID, actual)
		}
	}
}
Esempio n. 24
0
func TestPatchResourceWithConflict(t *testing.T) {
	namespace := "bar"
	name := "foo"
	uid := types.UID("uid")

	tc := &patchTestCase{
		name: "TestPatchResourceWithConflict",

		startingPod: &api.Pod{},
		changedPod:  &api.Pod{},
		updatePod:   &api.Pod{},

		expectedError: `Operation cannot be fulfilled on pods "foo": existing 2, new 1`,
	}

	tc.startingPod.Name = name
	tc.startingPod.Namespace = namespace
	tc.startingPod.UID = uid
	tc.startingPod.ResourceVersion = "1"
	tc.startingPod.APIVersion = "v1"
	tc.startingPod.Spec.NodeName = "here"

	tc.changedPod.Name = name
	tc.changedPod.Namespace = namespace
	tc.changedPod.UID = uid
	tc.changedPod.ResourceVersion = "1"
	tc.changedPod.APIVersion = "v1"
	tc.changedPod.Spec.NodeName = "there"

	tc.updatePod.Name = name
	tc.updatePod.Namespace = namespace
	tc.updatePod.UID = uid
	tc.updatePod.ResourceVersion = "2"
	tc.updatePod.APIVersion = "v1"
	tc.updatePod.Spec.NodeName = "anywhere"

	tc.Run(t)
}
Esempio n. 25
0
func createTestPodsStatusesAndEvents(num int) ([]*kubecontainer.Pod, []*kubecontainer.PodStatus, []*PodLifecycleEvent) {
	var pods []*kubecontainer.Pod
	var statuses []*kubecontainer.PodStatus
	var events []*PodLifecycleEvent
	for i := 0; i < num; i++ {
		id := types.UID(fmt.Sprintf("test-pod-%d", i))
		cState := kubecontainer.ContainerStateRunning
		container := createTestContainer(fmt.Sprintf("c%d", i), cState)
		pod := &kubecontainer.Pod{
			ID:         id,
			Containers: []*kubecontainer.Container{container},
		}
		status := &kubecontainer.PodStatus{
			ID:                id,
			ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: container.ID, State: cState}},
		}
		event := &PodLifecycleEvent{ID: pod.ID, Type: ContainerStarted, Data: container.ID.ID}
		pods = append(pods, pod)
		statuses = append(statuses, status)
		events = append(events, event)

	}
	return pods, statuses, events
}
Esempio n. 26
0
// doTestPlugin sets up a volume and tears it back down.
func doTestPlugin(t *testing.T, config pluginTestConfig) {
	basePath, err := utiltesting.MkTmpdir("emptydir_volume_test")
	if err != nil {
		t.Fatalf("can't make a temp rootdir: %v", err)
	}
	defer os.RemoveAll(basePath)

	var (
		volumePath  = path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/test-volume")
		metadataDir = path.Join(basePath, "pods/poduid/plugins/kubernetes.io~empty-dir/test-volume")

		plug       = makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath, config.rootContext)
		volumeName = "test-volume"
		spec       = &api.Volume{
			Name:         volumeName,
			VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: config.medium}},
		}

		physicalMounter = mount.FakeMounter{}
		mountDetector   = fakeMountDetector{}
		pod             = &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
	)

	// Set up the SELinux options on the pod
	if config.SELinuxOptions != nil {
		pod.Spec = api.PodSpec{
			Containers: []api.Container{
				{
					SecurityContext: &api.SecurityContext{
						SELinuxOptions: config.SELinuxOptions,
					},
					VolumeMounts: []api.VolumeMount{
						{
							Name: volumeName,
						},
					},
				},
			},
		}
	}

	if config.idempotent {
		physicalMounter.MountPoints = []mount.MountPoint{
			{
				Path: volumePath,
			},
		}
		util.SetReady(metadataDir)
	}

	mounter, err := plug.(*emptyDirPlugin).newMounterInternal(volume.NewSpecFromVolume(spec),
		pod,
		&physicalMounter,
		&mountDetector,
		volume.VolumeOptions{})
	if err != nil {
		t.Errorf("Failed to make a new Mounter: %v", err)
	}
	if mounter == nil {
		t.Errorf("Got a nil Mounter")
	}

	volPath := mounter.GetPath()
	if volPath != volumePath {
		t.Errorf("Got unexpected path: %s", volPath)
	}

	if err := mounter.SetUp(nil); err != nil {
		t.Errorf("Expected success, got: %v", err)
	}

	// Stat the directory and check the permission bits
	fileinfo, err := os.Stat(volPath)
	if !config.idempotent {
		if err != nil {
			if os.IsNotExist(err) {
				t.Errorf("SetUp() failed, volume path not created: %s", volPath)
			} else {
				t.Errorf("SetUp() failed: %v", err)
			}
		}
		if e, a := perm, fileinfo.Mode().Perm(); e != a {
			t.Errorf("Unexpected file mode for %v: expected: %v, got: %v", volPath, e, a)
		}
	} else if err == nil {
		// If this test is for idempotency and we were able
		// to stat the volume path, it's an error.
		t.Errorf("Volume directory was created unexpectedly")
	}

	// Check the number of mounts performed during setup
	if e, a := config.expectedSetupMounts, len(physicalMounter.Log); e != a {
		t.Errorf("Expected %v physicalMounter calls during setup, got %v", e, a)
	} else if config.expectedSetupMounts == 1 &&
		(physicalMounter.Log[0].Action != mount.FakeActionMount || physicalMounter.Log[0].FSType != "tmpfs") {
		t.Errorf("Unexpected physicalMounter action during setup: %#v", physicalMounter.Log[0])
	}
	physicalMounter.ResetLog()

	// Make a unmounter for the volume
	teardownMedium := mediumUnknown
	if config.medium == api.StorageMediumMemory {
		teardownMedium = mediumMemory
	}
	unmounterMountDetector := &fakeMountDetector{medium: teardownMedium, isMount: config.shouldBeMountedBeforeTeardown}
	unmounter, err := plug.(*emptyDirPlugin).newUnmounterInternal(volumeName, types.UID("poduid"), &physicalMounter, unmounterMountDetector)
	if err != nil {
		t.Errorf("Failed to make a new Unmounter: %v", err)
	}
	if unmounter == nil {
		t.Errorf("Got a nil Unmounter")
	}

	// Tear down the volume
	if err := unmounter.TearDown(); err != nil {
		t.Errorf("Expected success, got: %v", err)
	}
	if _, err := os.Stat(volPath); err == nil {
		t.Errorf("TearDown() failed, volume path still exists: %s", volPath)
	} else if !os.IsNotExist(err) {
		t.Errorf("SetUp() failed: %v", err)
	}

	// Check the number of physicalMounter calls during tardown
	if e, a := config.expectedTeardownMounts, len(physicalMounter.Log); e != a {
		t.Errorf("Expected %v physicalMounter calls during teardown, got %v", e, a)
	} else if config.expectedTeardownMounts == 1 && physicalMounter.Log[0].Action != mount.FakeActionUnmount {
		t.Errorf("Unexpected physicalMounter action during teardown: %#v", physicalMounter.Log[0])
	}
	physicalMounter.ResetLog()
}
Esempio n. 27
0
func (u *Unstructured) GetUID() types.UID {
	return types.UID(getNestedString(u.Object, "metadata", "uid"))
}
Esempio n. 28
0
// NewPreconditionDeleteOptions returns a DeleteOptions with a UID precondition set.
func NewPreconditionDeleteOptions(uid string) *DeleteOptions {
	u := types.UID(uid)
	p := Preconditions{UID: &u}
	return &DeleteOptions{Preconditions: &p}
}
Esempio n. 29
0
// NewUIDPreconditions returns a Preconditions with UID set.
func NewUIDPreconditions(uid string) *Preconditions {
	u := types.UID(uid)
	return &Preconditions{UID: &u}
}
Esempio n. 30
0
// FuzzerFor can randomly populate api objects that are destined for version.
func FuzzerFor(t *testing.T, version unversioned.GroupVersion, src rand.Source) *fuzz.Fuzzer {
	f := fuzz.New().NilChance(.5).NumElements(1, 1)
	if src != nil {
		f.RandSource(src)
	}
	f.Funcs(
		func(j *int, c fuzz.Continue) {
			*j = int(c.Int31())
		},
		func(j **int, c fuzz.Continue) {
			if c.RandBool() {
				i := int(c.Int31())
				*j = &i
			} else {
				*j = nil
			}
		},
		func(q *resource.Quantity, c fuzz.Continue) {
			*q = *resource.NewQuantity(c.Int63n(1000), resource.DecimalExponent)
		},
		func(j *runtime.TypeMeta, c fuzz.Continue) {
			// We have to customize the randomization of TypeMetas because their
			// APIVersion and Kind must remain blank in memory.
			j.APIVersion = ""
			j.Kind = ""
		},
		func(j *unversioned.TypeMeta, c fuzz.Continue) {
			// We have to customize the randomization of TypeMetas because their
			// APIVersion and Kind must remain blank in memory.
			j.APIVersion = ""
			j.Kind = ""
		},
		func(j *api.ObjectMeta, c fuzz.Continue) {
			j.Name = c.RandString()
			j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10)
			j.SelfLink = c.RandString()
			j.UID = types.UID(c.RandString())
			j.GenerateName = c.RandString()

			var sec, nsec int64
			c.Fuzz(&sec)
			c.Fuzz(&nsec)
			j.CreationTimestamp = unversioned.Unix(sec, nsec).Rfc3339Copy()
		},
		func(j *api.ObjectReference, c fuzz.Continue) {
			// We have to customize the randomization of TypeMetas because their
			// APIVersion and Kind must remain blank in memory.
			j.APIVersion = c.RandString()
			j.Kind = c.RandString()
			j.Namespace = c.RandString()
			j.Name = c.RandString()
			j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10)
			j.FieldPath = c.RandString()
		},
		func(j *unversioned.ListMeta, c fuzz.Continue) {
			j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10)
			j.SelfLink = c.RandString()
		},
		func(j *api.ListOptions, c fuzz.Continue) {
			label, _ := labels.Parse("a=b")
			j.LabelSelector = label
			field, _ := fields.ParseSelector("a=b")
			j.FieldSelector = field
		},
		func(j *api.PodExecOptions, c fuzz.Continue) {
			j.Stdout = true
			j.Stderr = true
		},
		func(j *api.PodAttachOptions, c fuzz.Continue) {
			j.Stdout = true
			j.Stderr = true
		},
		func(s *api.PodSpec, c fuzz.Continue) {
			c.FuzzNoCustom(s)
			// has a default value
			ttl := int64(30)
			if c.RandBool() {
				ttl = int64(c.Uint32())
			}
			s.TerminationGracePeriodSeconds = &ttl

			c.Fuzz(s.SecurityContext)

			if s.SecurityContext == nil {
				s.SecurityContext = new(api.PodSecurityContext)
			}
		},
		func(j *api.PodPhase, c fuzz.Continue) {
			statuses := []api.PodPhase{api.PodPending, api.PodRunning, api.PodFailed, api.PodUnknown}
			*j = statuses[c.Rand.Intn(len(statuses))]
		},
		func(j *api.Binding, c fuzz.Continue) {
			c.Fuzz(&j.ObjectMeta)
			j.Target.Name = c.RandString()
		},
		func(j *api.ReplicationControllerSpec, c fuzz.Continue) {
			c.FuzzNoCustom(j) // fuzz self without calling this function again
			//j.TemplateRef = nil // this is required for round trip
		},
		func(j *extensions.DeploymentStrategy, c fuzz.Continue) {
			c.FuzzNoCustom(j) // fuzz self without calling this function again
			// Ensure that strategyType is one of valid values.
			strategyTypes := []extensions.DeploymentStrategyType{extensions.RecreateDeploymentStrategyType, extensions.RollingUpdateDeploymentStrategyType}
			j.Type = strategyTypes[c.Rand.Intn(len(strategyTypes))]
			if j.Type != extensions.RollingUpdateDeploymentStrategyType {
				j.RollingUpdate = nil
			} else {
				rollingUpdate := extensions.RollingUpdateDeployment{}
				if c.RandBool() {
					rollingUpdate.MaxUnavailable = intstr.FromInt(int(c.RandUint64()))
					rollingUpdate.MaxSurge = intstr.FromInt(int(c.RandUint64()))
				} else {
					rollingUpdate.MaxSurge = intstr.FromString(fmt.Sprintf("%d%%", c.RandUint64()))
				}
				j.RollingUpdate = &rollingUpdate
			}
		},
		func(j *batch.JobSpec, c fuzz.Continue) {
			c.FuzzNoCustom(j) // fuzz self without calling this function again
			completions := int32(c.Rand.Int31())
			parallelism := int32(c.Rand.Int31())
			j.Completions = &completions
			j.Parallelism = &parallelism
			if c.Rand.Int31()%2 == 0 {
				j.ManualSelector = newBool(true)
			} else {
				j.ManualSelector = nil
			}
		},
		func(cp *batch.ConcurrencyPolicy, c fuzz.Continue) {
			policies := []batch.ConcurrencyPolicy{batch.AllowConcurrent, batch.ForbidConcurrent, batch.ReplaceConcurrent}
			*cp = policies[c.Rand.Intn(len(policies))]
		},
		func(j *api.List, c fuzz.Continue) {
			c.FuzzNoCustom(j) // fuzz self without calling this function again
			// TODO: uncomment when round trip starts from a versioned object
			if false { //j.Items == nil {
				j.Items = []runtime.Object{}
			}
		},
		func(j *runtime.Object, c fuzz.Continue) {
			// TODO: uncomment when round trip starts from a versioned object
			if true { //c.RandBool() {
				*j = &runtime.Unknown{
					// We do not set TypeMeta here because it is not carried through a round trip
					Raw:         []byte(`{"apiVersion":"unknown.group/unknown","kind":"Something","someKey":"someValue"}`),
					ContentType: runtime.ContentTypeJSON,
				}
			} else {
				types := []runtime.Object{&api.Pod{}, &api.ReplicationController{}}
				t := types[c.Rand.Intn(len(types))]
				c.Fuzz(t)
				*j = t
			}
		},
		func(q *api.ResourceRequirements, c fuzz.Continue) {
			randomQuantity := func() resource.Quantity {
				var q resource.Quantity
				c.Fuzz(&q)
				// precalc the string for benchmarking purposes
				_ = q.String()
				return q
			}
			q.Limits = make(api.ResourceList)
			q.Requests = make(api.ResourceList)
			cpuLimit := randomQuantity()
			q.Limits[api.ResourceCPU] = *cpuLimit.Copy()
			q.Requests[api.ResourceCPU] = *cpuLimit.Copy()
			memoryLimit := randomQuantity()
			q.Limits[api.ResourceMemory] = *memoryLimit.Copy()
			q.Requests[api.ResourceMemory] = *memoryLimit.Copy()
			storageLimit := randomQuantity()
			q.Limits[api.ResourceStorage] = *storageLimit.Copy()
			q.Requests[api.ResourceStorage] = *storageLimit.Copy()
		},
		func(q *api.LimitRangeItem, c fuzz.Continue) {
			var cpuLimit resource.Quantity
			c.Fuzz(&cpuLimit)

			q.Type = api.LimitTypeContainer
			q.Default = make(api.ResourceList)
			q.Default[api.ResourceCPU] = *(cpuLimit.Copy())

			q.DefaultRequest = make(api.ResourceList)
			q.DefaultRequest[api.ResourceCPU] = *(cpuLimit.Copy())

			q.Max = make(api.ResourceList)
			q.Max[api.ResourceCPU] = *(cpuLimit.Copy())

			q.Min = make(api.ResourceList)
			q.Min[api.ResourceCPU] = *(cpuLimit.Copy())

			q.MaxLimitRequestRatio = make(api.ResourceList)
			q.MaxLimitRequestRatio[api.ResourceCPU] = resource.MustParse("10")
		},
		func(p *api.PullPolicy, c fuzz.Continue) {
			policies := []api.PullPolicy{api.PullAlways, api.PullNever, api.PullIfNotPresent}
			*p = policies[c.Rand.Intn(len(policies))]
		},
		func(rp *api.RestartPolicy, c fuzz.Continue) {
			policies := []api.RestartPolicy{api.RestartPolicyAlways, api.RestartPolicyNever, api.RestartPolicyOnFailure}
			*rp = policies[c.Rand.Intn(len(policies))]
		},
		// Only api.DownwardAPIVolumeFile needs to have a specific func since FieldRef has to be
		// defaulted to a version otherwise roundtrip will fail
		// For the remaining volume plugins the default fuzzer is enough.
		func(m *api.DownwardAPIVolumeFile, c fuzz.Continue) {
			m.Path = c.RandString()
			versions := []string{"v1"}
			m.FieldRef = &api.ObjectFieldSelector{}
			m.FieldRef.APIVersion = versions[c.Rand.Intn(len(versions))]
			m.FieldRef.FieldPath = c.RandString()
		},
		func(vs *api.VolumeSource, c fuzz.Continue) {
			// Exactly one of the fields must be set.
			v := reflect.ValueOf(vs).Elem()
			i := int(c.RandUint64() % uint64(v.NumField()))
			t := v.Field(i).Addr()
			for v.Field(i).IsNil() {
				c.Fuzz(t.Interface())
			}
		},
		func(i *api.ISCSIVolumeSource, c fuzz.Continue) {
			i.ISCSIInterface = c.RandString()
			if i.ISCSIInterface == "" {
				i.ISCSIInterface = "default"
			}
		},
		func(d *api.DNSPolicy, c fuzz.Continue) {
			policies := []api.DNSPolicy{api.DNSClusterFirst, api.DNSDefault}
			*d = policies[c.Rand.Intn(len(policies))]
		},
		func(p *api.Protocol, c fuzz.Continue) {
			protocols := []api.Protocol{api.ProtocolTCP, api.ProtocolUDP}
			*p = protocols[c.Rand.Intn(len(protocols))]
		},
		func(p *api.ServiceAffinity, c fuzz.Continue) {
			types := []api.ServiceAffinity{api.ServiceAffinityClientIP, api.ServiceAffinityNone}
			*p = types[c.Rand.Intn(len(types))]
		},
		func(p *api.ServiceType, c fuzz.Continue) {
			types := []api.ServiceType{api.ServiceTypeClusterIP, api.ServiceTypeNodePort, api.ServiceTypeLoadBalancer}
			*p = types[c.Rand.Intn(len(types))]
		},
		func(ct *api.Container, c fuzz.Continue) {
			c.FuzzNoCustom(ct)                                          // fuzz self without calling this function again
			ct.TerminationMessagePath = "/" + ct.TerminationMessagePath // Must be non-empty
		},
		func(p *api.Probe, c fuzz.Continue) {
			c.FuzzNoCustom(p)
			// These fields have default values.
			intFieldsWithDefaults := [...]string{"TimeoutSeconds", "PeriodSeconds", "SuccessThreshold", "FailureThreshold"}
			v := reflect.ValueOf(p).Elem()
			for _, field := range intFieldsWithDefaults {
				f := v.FieldByName(field)
				if f.Int() == 0 {
					f.SetInt(1)
				}
			}
		},
		func(ev *api.EnvVar, c fuzz.Continue) {
			ev.Name = c.RandString()
			if c.RandBool() {
				ev.Value = c.RandString()
			} else {
				ev.ValueFrom = &api.EnvVarSource{}
				ev.ValueFrom.FieldRef = &api.ObjectFieldSelector{}

				var versions []unversioned.GroupVersion
				for _, testGroup := range testapi.Groups {
					versions = append(versions, *testGroup.GroupVersion())
				}

				ev.ValueFrom.FieldRef.APIVersion = versions[c.Rand.Intn(len(versions))].String()
				ev.ValueFrom.FieldRef.FieldPath = c.RandString()
			}
		},
		func(sc *api.SecurityContext, c fuzz.Continue) {
			c.FuzzNoCustom(sc) // fuzz self without calling this function again
			if c.RandBool() {
				priv := c.RandBool()
				sc.Privileged = &priv
			}

			if c.RandBool() {
				sc.Capabilities = &api.Capabilities{
					Add:  make([]api.Capability, 0),
					Drop: make([]api.Capability, 0),
				}
				c.Fuzz(&sc.Capabilities.Add)
				c.Fuzz(&sc.Capabilities.Drop)
			}
		},
		func(s *api.Secret, c fuzz.Continue) {
			c.FuzzNoCustom(s) // fuzz self without calling this function again
			s.Type = api.SecretTypeOpaque
		},
		func(r *api.RBDVolumeSource, c fuzz.Continue) {
			r.RBDPool = c.RandString()
			if r.RBDPool == "" {
				r.RBDPool = "rbd"
			}
			r.RadosUser = c.RandString()
			if r.RadosUser == "" {
				r.RadosUser = "******"
			}
			r.Keyring = c.RandString()
			if r.Keyring == "" {
				r.Keyring = "/etc/ceph/keyring"
			}
		},
		func(pv *api.PersistentVolume, c fuzz.Continue) {
			c.FuzzNoCustom(pv) // fuzz self without calling this function again
			types := []api.PersistentVolumePhase{api.VolumeAvailable, api.VolumePending, api.VolumeBound, api.VolumeReleased, api.VolumeFailed}
			pv.Status.Phase = types[c.Rand.Intn(len(types))]
			pv.Status.Message = c.RandString()
			reclamationPolicies := []api.PersistentVolumeReclaimPolicy{api.PersistentVolumeReclaimRecycle, api.PersistentVolumeReclaimRetain}
			pv.Spec.PersistentVolumeReclaimPolicy = reclamationPolicies[c.Rand.Intn(len(reclamationPolicies))]
		},
		func(pvc *api.PersistentVolumeClaim, c fuzz.Continue) {
			c.FuzzNoCustom(pvc) // fuzz self without calling this function again
			types := []api.PersistentVolumeClaimPhase{api.ClaimBound, api.ClaimPending, api.ClaimLost}
			pvc.Status.Phase = types[c.Rand.Intn(len(types))]
		},
		func(s *api.NamespaceSpec, c fuzz.Continue) {
			s.Finalizers = []api.FinalizerName{api.FinalizerKubernetes}
		},
		func(s *api.NamespaceStatus, c fuzz.Continue) {
			s.Phase = api.NamespaceActive
		},
		func(http *api.HTTPGetAction, c fuzz.Continue) {
			c.FuzzNoCustom(http)            // fuzz self without calling this function again
			http.Path = "/" + http.Path     // can't be blank
			http.Scheme = "x" + http.Scheme // can't be blank
		},
		func(ss *api.ServiceSpec, c fuzz.Continue) {
			c.FuzzNoCustom(ss) // fuzz self without calling this function again
			if len(ss.Ports) == 0 {
				// There must be at least 1 port.
				ss.Ports = append(ss.Ports, api.ServicePort{})
				c.Fuzz(&ss.Ports[0])
			}
			for i := range ss.Ports {
				switch ss.Ports[i].TargetPort.Type {
				case intstr.Int:
					ss.Ports[i].TargetPort.IntVal = 1 + ss.Ports[i].TargetPort.IntVal%65535 // non-zero
				case intstr.String:
					ss.Ports[i].TargetPort.StrVal = "x" + ss.Ports[i].TargetPort.StrVal // non-empty
				}
			}
		},
		func(n *api.Node, c fuzz.Continue) {
			c.FuzzNoCustom(n)
			n.Spec.ExternalID = "external"
		},
		func(s *api.NodeStatus, c fuzz.Continue) {
			c.FuzzNoCustom(s)
			s.Allocatable = s.Capacity
		},
		func(s *autoscaling.HorizontalPodAutoscalerSpec, c fuzz.Continue) {
			c.FuzzNoCustom(s) // fuzz self without calling this function again
			minReplicas := int32(c.Rand.Int31())
			s.MinReplicas = &minReplicas
			targetCpu := int32(c.RandUint64())
			s.TargetCPUUtilizationPercentage = &targetCpu
		},
		func(psp *extensions.PodSecurityPolicySpec, c fuzz.Continue) {
			c.FuzzNoCustom(psp) // fuzz self without calling this function again
			runAsUserRules := []extensions.RunAsUserStrategy{extensions.RunAsUserStrategyMustRunAsNonRoot, extensions.RunAsUserStrategyMustRunAs, extensions.RunAsUserStrategyRunAsAny}
			psp.RunAsUser.Rule = runAsUserRules[c.Rand.Intn(len(runAsUserRules))]
			seLinuxRules := []extensions.SELinuxStrategy{extensions.SELinuxStrategyRunAsAny, extensions.SELinuxStrategyMustRunAs}
			psp.SELinux.Rule = seLinuxRules[c.Rand.Intn(len(seLinuxRules))]
		},
		func(s *extensions.Scale, c fuzz.Continue) {
			c.FuzzNoCustom(s) // fuzz self without calling this function again
			// TODO: Implement a fuzzer to generate valid keys, values and operators for
			// selector requirements.
			if s.Status.Selector != nil {
				s.Status.Selector = &unversioned.LabelSelector{
					MatchLabels: map[string]string{
						"testlabelkey": "testlabelval",
					},
					MatchExpressions: []unversioned.LabelSelectorRequirement{
						{
							Key:      "testkey",
							Operator: unversioned.LabelSelectorOpIn,
							Values:   []string{"val1", "val2", "val3"},
						},
					},
				}
			}
		},
		func(r *runtime.RawExtension, c fuzz.Continue) {
			// Pick an arbitrary type and fuzz it
			types := []runtime.Object{&api.Pod{}, &extensions.Deployment{}, &api.Service{}}
			obj := types[c.Rand.Intn(len(types))]
			c.Fuzz(obj)

			// Find a codec for converting the object to raw bytes.  This is necessary for the
			// api version and kind to be correctly set be serialization.
			var codec runtime.Codec
			switch obj.(type) {
			case *api.Pod:
				codec = testapi.Default.Codec()
			case *extensions.Deployment:
				codec = testapi.Extensions.Codec()
			case *api.Service:
				codec = testapi.Default.Codec()
			default:
				t.Errorf("Failed to find codec for object type: %T", obj)
				return
			}

			// Convert the object to raw bytes
			bytes, err := runtime.Encode(codec, obj)
			if err != nil {
				t.Errorf("Failed to encode object: %v", err)
				return
			}

			// Set the bytes field on the RawExtension
			r.Raw = bytes
		},
	)
	return f
}