func TestForgetNonExistingPodWorkers(t *testing.T) { podWorkers, _ := createPodWorkers() numPods := 20 for i := 0; i < numPods; i++ { podWorkers.UpdatePod(newPod(string(i), "name"), nil, func() {}) } drainWorkers(podWorkers, numPods) if len(podWorkers.podUpdates) != numPods { t.Errorf("Incorrect number of open channels %v", len(podWorkers.podUpdates)) } desiredPods := map[types.UID]empty{} desiredPods[types.UID(2)] = empty{} desiredPods[types.UID(14)] = empty{} podWorkers.ForgetNonExistingPodWorkers(desiredPods) if len(podWorkers.podUpdates) != 2 { t.Errorf("Incorrect number of open channels %v", len(podWorkers.podUpdates)) } if _, exists := podWorkers.podUpdates[types.UID(2)]; !exists { t.Errorf("No updates channel for pod 2") } if _, exists := podWorkers.podUpdates[types.UID(14)]; !exists { t.Errorf("No updates channel for pod 14") } podWorkers.ForgetNonExistingPodWorkers(map[types.UID]empty{}) if len(podWorkers.podUpdates) != 0 { t.Errorf("Incorrect number of open channels %v", len(podWorkers.podUpdates)) } }
func TestPlugin(t *testing.T) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), newTestHost(t)) plug, err := plugMgr.FindPluginByName("kubernetes.io/git-repo") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ GitRepo: &api.GitRepoVolumeSource{ Repository: "https://github.com/GoogleCloudPlatform/kubernetes.git", Revision: "2a30ce65c5ab586b98916d83385c5983edd353a1", }, }, } pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, err := plug.NewBuilder(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{RootContext: ""}) if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder") } path := builder.GetPath() if !strings.HasSuffix(path, "pods/poduid/volumes/kubernetes.io~git-repo/vol1") { t.Errorf("Got unexpected path: %s", path) } testSetUp(plug, builder, t) if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } cleaner, err := plug.NewCleaner("vol1", types.UID("poduid")) if err != nil { t.Errorf("Failed to make a new Cleaner: %v", err) } if cleaner == nil { t.Errorf("Got a nil Cleaner") } if err := cleaner.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", path) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } }
func doTestPlugin(t *testing.T, spec *volume.Spec) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd") if err != nil { t.Errorf("Can't find the plugin by name") } builder, err := plug.(*rbdPlugin).newBuilderInternal(spec, types.UID("poduid"), &fakeDiskManager{}, &mount.FakeMounter{}, "secrets") if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Error("Got a nil Builder") } path := builder.GetPath() if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~rbd/vol1" { t.Errorf("Got unexpected path: %s", path) } if err := builder.SetUp(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } cleaner, err := plug.(*rbdPlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakeDiskManager{}, &mount.FakeMounter{}) if err != nil { t.Errorf("Failed to make a new Cleaner: %v", err) } if cleaner == nil { t.Error("Got a nil Cleaner") } if err := cleaner.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", path) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } }
func TestName(t *testing.T) { var ( testPodUID = types.UID("test_pod_uid") testVolumeName = "test_name" testNamespace = "test_metadata_namespace" testName = "test_metadata_name" ) volumeSpec := &api.Volume{ Name: testVolumeName, VolumeSource: api.VolumeSource{ DownwardAPI: &api.DownwardAPIVolumeSource{ Items: []api.DownwardAPIVolumeFile{ {Path: "name_file_name", FieldRef: api.ObjectFieldSelector{ FieldPath: "metadata.name"}}}}, }, } fake := testclient.NewSimpleFake(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: testName, Namespace: testNamespace, }, }) pluginMgr := volume.VolumePluginMgr{} pluginMgr.InitPlugins(ProbeVolumePlugins(), newTestHost(t, fake)) plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName) if err != nil { t.Errorf("Can't find the plugin by name") } pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Name: testName}} builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder") } volumePath := builder.GetPath() err = builder.SetUp() if err != nil { t.Errorf("Failed to setup volume: %v", err) } var data []byte data, err = ioutil.ReadFile(path.Join(volumePath, "name_file_name")) if err != nil { t.Errorf(err.Error()) } if string(data) != testName { t.Errorf("Found `%s` expected %s", string(data), testName) } CleanEverything(plugin, testVolumeName, volumePath, testPodUID, t) }
// Unpacks a container name, returning the pod full name and container name we would have used to // construct the docker name. If we are unable to parse the name, an error is returned. func ParseDockerName(name string) (dockerName *KubeletContainerName, hash uint64, err error) { // For some reason docker appears to be appending '/' to names. // If it's there, strip it. name = strings.TrimPrefix(name, "/") parts := strings.Split(name, "_") if len(parts) == 0 || parts[0] != containerNamePrefix { err = fmt.Errorf("failed to parse Docker container name %q into parts", name) return nil, 0, err } if len(parts) < 6 { // We have at least 5 fields. We may have more in the future. // Anything with less fields than this is not something we can // manage. glog.Warningf("found a container with the %q prefix, but too few fields (%d): %q", containerNamePrefix, len(parts), name) err = fmt.Errorf("Docker container name %q has less parts than expected %v", name, parts) return nil, 0, err } nameParts := strings.Split(parts[1], ".") containerName := nameParts[0] if len(nameParts) > 1 { hash, err = strconv.ParseUint(nameParts[1], 16, 32) if err != nil { glog.Warningf("invalid container hash %q in container %q", nameParts[1], name) } } podFullName := parts[2] + "_" + parts[3] podUID := types.UID(parts[4]) return &KubeletContainerName{podFullName, podUID, containerName}, hash, nil }
func TestBuilderAndCleanerTypeAssert(t *testing.T) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{ VolumeID: "pd", FSType: "ext4", }, }, } builder, err := plug.(*awsElasticBlockStorePlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) if _, ok := builder.(volume.Cleaner); ok { t.Errorf("Volume Builder can be type-assert to Cleaner") } cleaner, err := plug.(*awsElasticBlockStorePlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) if _, ok := cleaner.(volume.Builder); ok { t.Errorf("Volume Cleaner can be type-assert to Builder") } }
func TestUpdatePod(t *testing.T) { podWorkers, processed := createPodWorkers() // Check whether all pod updates will be processed. numPods := 20 for i := 0; i < numPods; i++ { for j := i; j < numPods; j++ { podWorkers.UpdatePod(newPod(string(j), string(i)), nil, func() {}) } } drainWorkers(podWorkers, numPods) if len(processed) != 20 { t.Errorf("Not all pods processed: %v", len(processed)) return } for i := 0; i < numPods; i++ { uid := types.UID(i) if len(processed[uid]) < 1 || len(processed[uid]) > i+1 { t.Errorf("Pod %v processed %v times", i, len(processed[uid])) continue } first := 0 last := len(processed[uid]) - 1 if processed[uid][first] != string(0) { t.Errorf("Pod %v: incorrect order %v, %v", i, first, processed[uid][first]) } if processed[uid][last] != string(i) { t.Errorf("Pod %v: incorrect order %v, %v", i, last, processed[uid][last]) } } }
func newPod(uid, name string) *api.Pod { return &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: types.UID(uid), Name: name, }, } }
func getPodCoordinates(request *restful.Request) (namespace, pod string, uid types.UID) { namespace = request.PathParameter("podNamespace") pod = request.PathParameter("podID") if uidStr := request.PathParameter("uid"); uidStr != "" { uid = types.UID(uidStr) } return }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false}, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } ep := &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "ep", }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}}, Ports: []api.EndpointPort{{"foo", 80, api.ProtocolTCP}}, }}, } o := testclient.NewObjects(api.Scheme, api.Scheme) o.Add(pv) o.Add(claim) o.Add(ep) client := &testclient.Fake{} client.AddReactor("*", "*", testclient.ObjectReaction(o, testapi.Default.RESTMapper())) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) plug, _ := plugMgr.FindPluginByName(glusterfsPluginName) // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) if !builder.IsReadOnly() { t.Errorf("Expected true for builder.IsReadOnly") } }
/** * The UUID package is naive and can generate identical UUIDs if the time interval is quick enough. * Block subsequent UUIDs for 200 Nanoseconds, the UUID uses 100 ns increments, we block for 200 to be safe * Blocks in a go routine, so that the caller doesn't have to wait. * TODO: save old unused UUIDs so that no one has to block. */ func NewUUID() types.UID { uuidLock.Lock() result := uuid.NewUUID() go func() { time.Sleep(200 * time.Nanosecond) uuidLock.Unlock() }() return types.UID(result.String()) }
func TestPlugin(t *testing.T) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ CephFS: &api.CephFSVolumeSource{ Monitors: []string{"a", "b"}, User: "******", SecretRef: nil, SecretFile: "/etc/ceph/user.secret", }, }, } builder, err := plug.(*cephfsPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}, "secrets") volumePath := builder.GetPath() if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder: %v") } path := builder.GetPath() if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~cephfs/vol1" { t.Errorf("Got unexpected path: %s", path) } if err := builder.SetUp(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volumePath); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", volumePath) } else { t.Errorf("SetUp() failed: %v", err) } } cleaner, err := plug.(*cephfsPlugin).newCleanerInternal("vol1", types.UID("poduid"), &mount.FakeMounter{}) if err != nil { t.Errorf("Failed to make a new Cleaner: %v", err) } if cleaner == nil { t.Errorf("Got a nil Cleaner: %v") } if err := cleaner.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volumePath); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", volumePath) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } }
// Test the case where the 'ready' file has been created and the pod volume dir // is a mountpoint. Mount should not be called. func TestPluginIdempotent(t *testing.T) { var ( testPodUID = types.UID("test_pod_uid2") testVolumeName = "test_volume_name" testNamespace = "test_secret_namespace" testName = "test_secret_name" volumeSpec = volumeSpec(testVolumeName, testName) secret = secret(testNamespace, testName) client = testclient.NewSimpleFake(&secret) pluginMgr = volume.VolumePluginMgr{} rootDir, host = newTestHost(t, client) ) pluginMgr.InitPlugins(ProbeVolumePlugins(), host) plugin, err := pluginMgr.FindPluginByName(secretPluginName) if err != nil { t.Errorf("Can't find the plugin by name") } podVolumeDir := fmt.Sprintf("%v/pods/test_pod_uid2/volumes/kubernetes.io~secret/test_volume_name", rootDir) podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid2/plugins/kubernetes.io~secret/test_volume_name", rootDir) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}} mounter := host.GetMounter().(*mount.FakeMounter) mounter.MountPoints = []mount.MountPoint{ { Path: podVolumeDir, }, } util.SetReady(podMetadataDir) builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder") } volumePath := builder.GetPath() err = builder.SetUp() if err != nil { t.Errorf("Failed to setup volume: %v", err) } if len(mounter.Log) != 0 { t.Errorf("Unexpected calls made to mounter: %v", mounter.Log) } if _, err := os.Stat(volumePath); err != nil { if !os.IsNotExist(err) { t.Errorf("SetUp() failed unexpectedly: %v", err) } } else { t.Errorf("volume path should not exist: %v", volumePath) } }
func getContainerCoordinates(request *restful.Request) (namespace, pod string, uid types.UID, container string) { namespace = request.PathParameter("podNamespace") pod = request.PathParameter("podID") if uidStr := request.PathParameter("uid"); uidStr != "" { uid = types.UID(uidStr) } container = request.PathParameter("containerName") return }
func TestNewCleaner(t *testing.T) { assert := assert.New(t) p := flockerPlugin{} cleaner, err := p.NewCleaner("", types.UID("")) assert.Nil(cleaner) assert.NoError(err) }
func (nc *NodeController) recordNodeEvent(nodeName string, reason string, event string) { ref := &api.ObjectReference{ Kind: "Node", Name: nodeName, UID: types.UID(nodeName), Namespace: "", } glog.V(2).Infof("Recording %s event message for node %s", event, nodeName) nc.recorder.Eventf(ref, reason, "Node %s event: %s", nodeName, event) }
func TestPlugin(t *testing.T) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/host-path") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/vol1"}}, } pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, err := plug.NewBuilder(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{}) if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder") } path := builder.GetPath() if path != "/vol1" { t.Errorf("Got unexpected path: %s", path) } if err := builder.SetUp(); err != nil { t.Errorf("Expected success, got: %v", err) } cleaner, err := plug.NewCleaner("vol1", types.UID("poduid")) if err != nil { t.Errorf("Failed to make a new Cleaner: %v", err) } if cleaner == nil { t.Errorf("Got a nil Cleaner") } if err := cleaner.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } }
func (nc *NodeController) recordNodeStatusChange(node *api.Node, new_status string) { ref := &api.ObjectReference{ Kind: "Node", Name: node.Name, UID: types.UID(node.Name), Namespace: "", } glog.V(2).Infof("Recording status change %s event message for node %s", new_status, node.Name) // TODO: This requires a transaction, either both node status is updated // and event is recorded or neither should happen, see issue #6055. nc.recorder.Eventf(ref, new_status, "Node %s status is now: %s", node.Name, new_status) }
// Test the case where the plugin's ready file exists, but the volume dir is not a // mountpoint, which is the state the system will be in after reboot. The dir // should be mounter and the secret data written to it. func TestPluginReboot(t *testing.T) { var ( testPodUID = types.UID("test_pod_uid3") testVolumeName = "test_volume_name" testNamespace = "test_secret_namespace" testName = "test_secret_name" volumeSpec = volumeSpec(testVolumeName, testName) secret = secret(testNamespace, testName) client = testclient.NewSimpleFake(&secret) pluginMgr = volume.VolumePluginMgr{} rootDir, host = newTestHost(t, client) ) pluginMgr.InitPlugins(ProbeVolumePlugins(), host) plugin, err := pluginMgr.FindPluginByName(secretPluginName) if err != nil { t.Errorf("Can't find the plugin by name") } pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}} builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder") } podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid3/plugins/kubernetes.io~secret/test_volume_name", rootDir) util.SetReady(podMetadataDir) volumePath := builder.GetPath() if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid3/volumes/kubernetes.io~secret/test_volume_name")) { t.Errorf("Got unexpected path: %s", volumePath) } err = builder.SetUp() if err != nil { t.Errorf("Failed to setup volume: %v", err) } if _, err := os.Stat(volumePath); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", volumePath) } else { t.Errorf("SetUp() failed: %v", err) } } doTestSecretDataInVolume(volumePath, secret, t) doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t) }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { lun := 0 pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ FC: &api.FCVolumeSource{ TargetWWNs: []string{"some_wwn"}, FSType: "ext4", Lun: &lun, }, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } o := testclient.NewObjects(api.Scheme, api.Scheme) o.Add(pv) o.Add(claim) client := &testclient.Fake{} client.AddReactor("*", "*", testclient.ObjectReaction(o, testapi.Default.RESTMapper())) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) plug, _ := plugMgr.FindPluginByName(fcPluginName) // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) if !builder.IsReadOnly() { t.Errorf("Expected true for builder.IsReadOnly") } }
func verifyPackUnpack(t *testing.T, podNamespace, podUID, podName, containerName string) { container := &api.Container{Name: containerName} hasher := adler32.New() util.DeepHashObject(hasher, *container) computedHash := uint64(hasher.Sum32()) podFullName := fmt.Sprintf("%s_%s", podName, podNamespace) _, name := BuildDockerName(KubeletContainerName{podFullName, types.UID(podUID), container.Name}, container) returned, hash, err := ParseDockerName(name) if err != nil { t.Errorf("Failed to parse Docker container name %q: %v", name, err) } if podFullName != returned.PodFullName || podUID != string(returned.PodUID) || containerName != returned.ContainerName || computedHash != hash { t.Errorf("For (%s, %s, %s, %d), unpacked (%s, %s, %s, %d)", podFullName, podUID, containerName, computedHash, returned.PodFullName, returned.PodUID, returned.ContainerName, hash) } }
func drainWorkers(podWorkers *podWorkers, numPods int) { for { stillWorking := false podWorkers.podLock.Lock() for i := 0; i < numPods; i++ { if podWorkers.isWorking[types.UID(string(i))] { stillWorking = true } } podWorkers.podLock.Unlock() if !stillWorking { break } time.Sleep(50 * time.Millisecond) } }
// Converts docker.APIContainers to kubecontainer.Container. func toRuntimeContainer(c *docker.APIContainers) (*kubecontainer.Container, error) { if c == nil { return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container") } dockerName, hash, err := getDockerContainerNameInfo(c) if err != nil { return nil, err } return &kubecontainer.Container{ ID: types.UID(c.ID), Name: dockerName.ContainerName, Image: c.Image, Hash: hash, Created: c.Created, }, nil }
// apiPodToruntimePod converts an api.Pod to kubelet/container.Pod. // we save the this for later reconstruction of the kubelet/container.Pod // such as in GetPods(). func apiPodToRuntimePod(uuid string, pod *api.Pod) *kubecontainer.Pod { p := &kubecontainer.Pod{ ID: pod.UID, Name: pod.Name, Namespace: pod.Namespace, } for i := range pod.Spec.Containers { c := &pod.Spec.Containers[i] p.Containers = append(p.Containers, &kubecontainer.Container{ ID: types.UID(buildContainerID(&containerID{uuid, c.Name})), Name: c.Name, Image: c.Image, Hash: kubecontainer.HashContainer(c), Created: time.Now().Unix(), }) } return p }
func TestNewBuilderClaimNotBound(t *testing.T) { pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvC", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimC", Namespace: "nsA", }, } podVolume := api.VolumeSource{ PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ ReadOnly: false, ClaimName: "claimC", }, } o := testclient.NewObjects(api.Scheme, api.Scheme) o.Add(pv) o.Add(claim) client := &testclient.Fake{} client.AddReactor("*", "*", testclient.ObjectReaction(o, api.RESTMapper)) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(testProbeVolumePlugins(), newTestHost(t, client)) plug, err := plugMgr.FindPluginByName("kubernetes.io/persistent-claim") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &volume.Spec{Volume: &api.Volume{VolumeSource: podVolume}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, err := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) if builder != nil { t.Errorf("Expected a nil builder if the claim wasn't bound") } }
func CreateValidPod(name, namespace string) *api.Pod { return &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: types.UID(name), // for the purpose of testing, this is unique enough Name: name, Namespace: namespace, }, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, Containers: []api.Container{ { Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), }, }, }, } }
func TestSetUpAtInternal(t *testing.T) { const dir = "dir" mockPath := "expected-to-be-set-properly" // package var expectedPath := mockPath assert := assert.New(t) plugMgr, rootDir := newInitializedVolumePlugMgr(t) if rootDir != "" { defer os.RemoveAll(rootDir) } plug, err := plugMgr.FindPluginByName(flockerPluginName) assert.NoError(err) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} b := flockerBuilder{flocker: &flocker{pod: pod, plugin: plug.(*flockerPlugin)}} b.client = newMockFlockerClient("dataset-id", "primary-uid", mockPath) assert.NoError(b.SetUpAt(dir)) assert.Equal(expectedPath, b.flocker.path) }
func TestPluginBackCompat(t *testing.T) { basePath := "/tmp/fake" plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath) spec := &api.Volume{ Name: "vol1", } pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, err := plug.NewBuilder(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{RootContext: ""}) if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder") } volPath := builder.GetPath() if volPath != path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/vol1") { t.Errorf("Got unexpected path: %s", volPath) } }
func TestToRuntimeContainer(t *testing.T) { original := &docker.APIContainers{ ID: "ab2cdf", Image: "bar_image", Created: 12345, Names: []string{"/k8s_bar.5678_foo_ns_1234_42"}, } expected := &kubecontainer.Container{ ID: types.UID("ab2cdf"), Name: "bar", Image: "bar_image", Hash: 0x5678, Created: 12345, } actual, err := toRuntimeContainer(original) if err != nil { t.Fatalf("unexpected error %v", err) } if !reflect.DeepEqual(expected, actual) { t.Errorf("expected %#v, got %#v", expected, actual) } }
func TestRespectsExistingMount(t *testing.T) { ns := "myns" tokenName := "token-name" serviceAccountName := DefaultServiceAccountName serviceAccountUID := "12345" expectedVolumeMount := api.VolumeMount{ Name: "my-custom-mount", ReadOnly: false, MountPath: DefaultAPITokenMountPath, } admit := NewServiceAccount(nil) admit.MountServiceAccountToken = true admit.RequireAPIToken = true // Add the default service account for the ns with a token into the cache admit.serviceAccounts.Add(&api.ServiceAccount{ ObjectMeta: api.ObjectMeta{ Name: serviceAccountName, Namespace: ns, UID: types.UID(serviceAccountUID), }, Secrets: []api.ObjectReference{ {Name: tokenName}, }, }) // Add a token for the service account into the cache admit.secrets.Add(&api.Secret{ ObjectMeta: api.ObjectMeta{ Name: tokenName, Namespace: ns, Annotations: map[string]string{ api.ServiceAccountNameKey: serviceAccountName, api.ServiceAccountUIDKey: serviceAccountUID, }, }, Type: api.SecretTypeServiceAccountToken, Data: map[string][]byte{ api.ServiceAccountTokenKey: []byte("token-data"), }, }) // Define a pod with a container that already mounts a volume at the API token path // Admission should respect that // Additionally, no volume should be created if no container is going to use it pod := &api.Pod{ Spec: api.PodSpec{ Containers: []api.Container{ { VolumeMounts: []api.VolumeMount{ expectedVolumeMount, }, }, }, }, } attrs := admission.NewAttributesRecord(pod, "Pod", ns, "myname", string(api.ResourcePods), "", admission.Create, nil) err := admit.Admit(attrs) if err != nil { t.Errorf("Unexpected error: %v", err) } if pod.Spec.ServiceAccountName != DefaultServiceAccountName { t.Errorf("Expected service account %s assigned, got %s", DefaultServiceAccountName, pod.Spec.ServiceAccountName) } if len(pod.Spec.Volumes) != 0 { t.Fatalf("Expected 0 volumes (shouldn't create a volume for a secret we don't need), got %d", len(pod.Spec.Volumes)) } if len(pod.Spec.Containers[0].VolumeMounts) != 1 { t.Fatalf("Expected 1 volume mount, got %d", len(pod.Spec.Containers[0].VolumeMounts)) } if !reflect.DeepEqual(expectedVolumeMount, pod.Spec.Containers[0].VolumeMounts[0]) { t.Fatalf("Expected\n\t%#v\ngot\n\t%#v", expectedVolumeMount, pod.Spec.Containers[0].VolumeMounts[0]) } }