func doTestPluginMountUnmount(t *testing.T, spec *volume.Spec, tmpDir string) { tmpDir, err := utiltesting.MkTmpdir("flexvolume_test") if err != nil { t.Fatalf("error creating temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} installPluginUnderTest(t, "kubernetes.io", "fakeMounter", tmpDir, execScriptTempl2, nil) plugMgr.InitPlugins(ProbeVolumePlugins(tmpDir), volume.NewFakeVolumeHost(tmpDir, nil, nil)) plugin, err := plugMgr.FindPluginByName("kubernetes.io/fakeMounter") if err != nil { t.Errorf("Can't find the plugin by name") } fake := &mount.FakeMounter{} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, err := plugin.(*flexVolumePlugin).newBuilderInternal(spec, pod, &flexVolumeUtil{}, fake, exec.New(), "") volumePath := builder.GetPath() if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder") } path := builder.GetPath() expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~fakeMounter/vol1", tmpDir) if path != expectedPath { t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path) } if err := builder.SetUp(nil); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volumePath); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", volumePath) } else { t.Errorf("SetUp() failed: %v", err) } } t.Logf("Setup successful") if builder.(*flexVolumeBuilder).readOnly { t.Errorf("The volume source should not be read-only and it is.") } cleaner, err := plugin.(*flexVolumePlugin).newCleanerInternal("vol1", types.UID("poduid"), &flexVolumeUtil{}, fake, exec.New()) if err != nil { t.Errorf("Failed to make a new Cleaner: %v", err) } if cleaner == nil { t.Errorf("Got a nil Cleaner") } if err := cleaner.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volumePath); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", volumePath) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } }
func TestMounterAndUnmounterTypeAssert(t *testing.T) { tmpDir, err := ioutil.TempDir(os.TempDir(), "azurefileTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/azure-file") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ AzureFile: &api.AzureFileVolumeSource{ SecretName: "secret", ShareName: "share", }, }, } fake := &mount.FakeMounter{} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} mounter, err := plug.(*azureFilePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &fakeAzureSvc{}, fake) if _, ok := mounter.(volume.Unmounter); ok { t.Errorf("Volume Mounter can be type-assert to Unmounter") } unmounter, err := plug.(*azureFilePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{}) if _, ok := unmounter.(volume.Mounter); ok { t.Errorf("Volume Unmounter can be type-assert to Mounter") } }
func TestForgetNonExistingPodWorkers(t *testing.T) { podWorkers, _ := createPodWorkers() numPods := 20 for i := 0; i < numPods; i++ { podWorkers.UpdatePod(newPod(string(i), "name"), nil, func() {}) } drainWorkers(podWorkers, numPods) if len(podWorkers.podUpdates) != numPods { t.Errorf("Incorrect number of open channels %v", len(podWorkers.podUpdates)) } desiredPods := map[types.UID]empty{} desiredPods[types.UID(2)] = empty{} desiredPods[types.UID(14)] = empty{} podWorkers.ForgetNonExistingPodWorkers(desiredPods) if len(podWorkers.podUpdates) != 2 { t.Errorf("Incorrect number of open channels %v", len(podWorkers.podUpdates)) } if _, exists := podWorkers.podUpdates[types.UID(2)]; !exists { t.Errorf("No updates channel for pod 2") } if _, exists := podWorkers.podUpdates[types.UID(14)]; !exists { t.Errorf("No updates channel for pod 14") } podWorkers.ForgetNonExistingPodWorkers(map[types.UID]empty{}) if len(podWorkers.podUpdates) != 0 { t.Errorf("Incorrect number of open channels %v", len(podWorkers.podUpdates)) } }
func TestStatusRecordRejection(t *testing.T) { now := nowFn() nowFn = func() unversioned.Time { return now } p := &fakePlugin{} c := testclient.NewSimpleFake(&routeapi.Route{}) admitter := NewStatusAdmitter(p, c, "test") admitter.RecordRouteRejection(&routeapi.Route{ ObjectMeta: kapi.ObjectMeta{Name: "route1", Namespace: "default", UID: types.UID("uid1")}, Spec: routeapi.RouteSpec{Host: "route1.test.local"}, }, "Failed", "generic error") if len(c.Actions()) != 1 { t.Fatalf("unexpected actions: %#v", c.Actions()) } action := c.Actions()[0] if action.GetVerb() != "update" || action.GetResource() != "routes" || action.GetSubresource() != "status" { t.Fatalf("unexpected action: %#v", action) } obj := c.Actions()[0].(ktestclient.UpdateAction).GetObject().(*routeapi.Route) if len(obj.Status.Ingress) != 1 || obj.Status.Ingress[0].Host != "route1.test.local" { t.Fatalf("expected route reset: %#v", obj) } condition := obj.Status.Ingress[0].Conditions[0] if condition.LastTransitionTime == nil || *condition.LastTransitionTime != now || condition.Status != kapi.ConditionFalse || condition.Reason != "Failed" || condition.Message != "generic error" { t.Fatalf("unexpected condition: %#v", condition) } if v, ok := admitter.expected.Peek(types.UID("uid1")); !ok || !reflect.DeepEqual(v, now.Time) { t.Fatalf("expected empty time: %#v", v) } }
func checkResult(t *testing.T, err error, c *testclient.Fake, admitter *StatusAdmitter, targetHost string, targetObjTime unversioned.Time, targetCachedTime *time.Time, ingressInd int, actionInd int) *routeapi.Route { if err != nil { t.Fatalf("unexpected error: %v", err) } if len(c.Actions()) != actionInd+1 { t.Fatalf("unexpected actions: %#v", c.Actions()) } action := c.Actions()[actionInd] if action.GetVerb() != "update" || action.GetResource() != "routes" || action.GetSubresource() != "status" { t.Fatalf("unexpected action: %#v", action) } obj := c.Actions()[actionInd].(ktestclient.UpdateAction).GetObject().(*routeapi.Route) if len(obj.Status.Ingress) != ingressInd+1 || obj.Status.Ingress[ingressInd].Host != targetHost { t.Fatalf("expected route reset: expected %q / actual %q -- %#v", targetHost, obj.Status.Ingress[ingressInd].Host, obj) } condition := obj.Status.Ingress[ingressInd].Conditions[0] if condition.LastTransitionTime == nil || *condition.LastTransitionTime != targetObjTime || condition.Status != kapi.ConditionTrue || condition.Reason != "" { t.Fatalf("%s: unexpected condition: %#v", targetHost, condition) } if targetCachedTime == nil { if v, ok := admitter.expected.Peek(types.UID("uid1")); ok { t.Fatalf("expected empty time: %#v", v) } } else { if v, ok := admitter.expected.Peek(types.UID("uid1")); !ok || !reflect.DeepEqual(v, *targetCachedTime) { t.Fatalf("did not record last modification time: %#v %#v", admitter.expected, v) } } return obj }
func TestDefaultResourceFromIngress(te *testing.T) { var ( is = assert.New(te) m = NewCache() ing = &extensions.Ingress{ ObjectMeta: api.ObjectMeta{Name: "ingress", Namespace: "test", UID: types.UID("one")}, Spec: extensions.IngressSpec{ Backend: &extensions.IngressBackend{ ServiceName: "service", ServicePort: intstr.FromString("web"), }, }, } svc = &api.Service{ ObjectMeta: api.ObjectMeta{Name: "service", Namespace: "test", UID: types.UID("two")}, Spec: api.ServiceSpec{ Type: api.ServiceTypeClusterIP, ClusterIP: "1.2.3.4", Ports: []api.ServicePort{ api.ServicePort{Name: "web", Port: 80, TargetPort: intstr.FromString("http")}, }, }, } end = &api.Endpoints{ ObjectMeta: api.ObjectMeta{Name: "service", Namespace: "test", UID: types.UID("three")}, Subsets: []api.EndpointSubset{ api.EndpointSubset{ Addresses: []api.EndpointAddress{ api.EndpointAddress{IP: "10.11.12.13"}, api.EndpointAddress{IP: "10.20.21.23"}, }, Ports: []api.EndpointPort{ api.EndpointPort{Name: "web", Port: 8080, Protocol: api.ProtocolTCP}, }, }, }, } ) if testing.Verbose() { logger.Configure("debug", "[romulus-test] ", os.Stdout) defer logger.SetLevel("error") } m.SetServiceStore(cache.NewStore(cache.MetaNamespaceKeyFunc)) m.SetEndpointsStore(cache.NewStore(cache.MetaNamespaceKeyFunc)) m.endpoints.Add(end) m.service.Add(svc) list := resourcesFromIngress(m, ing) te.Logf("Default ResourceList: %v", list) is.True(len(list) > 0, "ResourceList should be non-zero") ma := list.Map() rsc, ok := ma["test.service.web"] if is.True(ok, "'test.service.web' not created: %v", list) { is.False(rsc.NoServers(), "%v should have servers", rsc) } }
func doTestPlugin(t *testing.T, spec *volume.Spec) { tmpDir, err := utiltesting.MkTmpdir("rbd_test") if err != nil { t.Fatalf("error creating temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */)) plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd") if err != nil { t.Errorf("Can't find the plugin by name") } fdm := NewFakeDiskManager() defer fdm.Cleanup() mounter, err := plug.(*rbdPlugin).newMounterInternal(spec, types.UID("poduid"), fdm, &mount.FakeMounter{}, "secrets") if err != nil { t.Errorf("Failed to make a new Mounter: %v", err) } if mounter == nil { t.Error("Got a nil Mounter") } path := mounter.GetPath() expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~rbd/vol1", tmpDir) if path != expectedPath { t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path) } if err := mounter.SetUp(nil); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } unmounter, err := plug.(*rbdPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fdm, &mount.FakeMounter{}) if err != nil { t.Errorf("Failed to make a new Unmounter: %v", err) } if unmounter == nil { t.Error("Got a nil Unmounter") } if err := unmounter.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", path) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } }
func TestPlugin(t *testing.T) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), newTestHost(t)) plug, err := plugMgr.FindPluginByName("kubernetes.io/git-repo") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ GitRepo: &api.GitRepoVolumeSource{ Repository: "https://github.com/GoogleCloudPlatform/kubernetes.git", Revision: "2a30ce65c5ab586b98916d83385c5983edd353a1", }, }, } pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, err := plug.NewBuilder(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{RootContext: ""}) if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder") } path := builder.GetPath() if !strings.HasSuffix(path, "pods/poduid/volumes/kubernetes.io~git-repo/vol1") { t.Errorf("Got unexpected path: %s", path) } testSetUp(plug, builder, t) if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } cleaner, err := plug.NewCleaner("vol1", types.UID("poduid")) if err != nil { t.Errorf("Failed to make a new Cleaner: %v", err) } if cleaner == nil { t.Errorf("Got a nil Cleaner") } if err := cleaner.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", path) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } }
func doTestPlugin(t *testing.T, spec *volume.Spec) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd") if err != nil { t.Errorf("Can't find the plugin by name") } builder, err := plug.(*rbdPlugin).newBuilderInternal(spec, types.UID("poduid"), &fakeDiskManager{}, &mount.FakeMounter{}, "secrets") if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder: %v") } path := builder.GetPath() if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~rbd/vol1" { t.Errorf("Got unexpected path: %s", path) } if err := builder.SetUp(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } cleaner, err := plug.(*rbdPlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakeDiskManager{}, &mount.FakeMounter{}) if err != nil { t.Errorf("Failed to make a new Cleaner: %v", err) } if cleaner == nil { t.Errorf("Got a nil Cleaner: %v") } if err := cleaner.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", path) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } }
// doProbe probes the container once and records the result. // Returns whether the worker should continue. func doProbe(m *manager, w *worker) (keepGoing bool) { defer util.HandleCrash(func(_ interface{}) { keepGoing = true }) status, ok := m.statusManager.GetPodStatus(w.pod.UID) if !ok { // Either the pod has not been created yet, or it was already deleted. glog.V(3).Infof("No status for pod: %v", kubeutil.FormatPodName(w.pod)) return true } // Worker should terminate if pod is terminated. if status.Phase == api.PodFailed || status.Phase == api.PodSucceeded { glog.V(3).Infof("Pod %v %v, exiting probe worker", kubeutil.FormatPodName(w.pod), status.Phase) return false } c, ok := api.GetContainerStatus(status.ContainerStatuses, w.container.Name) if !ok { // Either the container has not been created yet, or it was deleted. glog.V(3).Infof("Non-existant container probed: %v - %v", kubeutil.FormatPodName(w.pod), w.container.Name) return true // Wait for more information. } if w.containerID != types.UID(c.ContainerID) { if w.containerID != "" { m.readinessCache.removeReadiness(string(w.containerID)) } w.containerID = types.UID(kubecontainer.TrimRuntimePrefix(c.ContainerID)) } if c.State.Running == nil { glog.V(3).Infof("Non-running container probed: %v - %v", kubeutil.FormatPodName(w.pod), w.container.Name) m.readinessCache.setReadiness(string(w.containerID), false) // Abort if the container will not be restarted. return c.State.Terminated == nil || w.pod.Spec.RestartPolicy != api.RestartPolicyNever } if int64(time.Since(c.State.Running.StartedAt.Time).Seconds()) < w.spec.InitialDelaySeconds { // Readiness defaults to false during the initial delay. m.readinessCache.setReadiness(string(w.containerID), false) return true } // TODO: Move error handling out of prober. result, _ := m.prober.ProbeReadiness(w.pod, status, w.container, string(w.containerID)) if result != probe.Unknown { m.readinessCache.setReadiness(string(w.containerID), result != probe.Failure) } return true }
func TestGetPods(t *testing.T) { manager, fakeDocker := newTestDockerManager() dockerContainers := []*docker.Container{ { ID: "1111", Name: "/k8s_foo_qux_new_1234_42", }, { ID: "2222", Name: "/k8s_bar_qux_new_1234_42", }, { ID: "3333", Name: "/k8s_bar_jlk_wen_5678_42", }, } // Convert the docker containers. This does not affect the test coverage // because the conversion is tested separately in convert_test.go containers := make([]*kubecontainer.Container, len(dockerContainers)) for i := range containers { c, err := toRuntimeContainer(&docker.APIContainers{ ID: dockerContainers[i].ID, Names: []string{dockerContainers[i].Name}, }) if err != nil { t.Fatalf("unexpected error %v", err) } containers[i] = c } expected := []*kubecontainer.Pod{ { ID: types.UID("1234"), Name: "qux", Namespace: "new", Containers: []*kubecontainer.Container{containers[0], containers[1]}, }, { ID: types.UID("5678"), Name: "jlk", Namespace: "wen", Containers: []*kubecontainer.Container{containers[2]}, }, } fakeDocker.SetFakeRunningContainers(dockerContainers) actual, err := manager.GetPods(false) if err != nil { t.Fatalf("unexpected error %v", err) } if !verifyPods(expected, actual) { t.Errorf("expected %#v, got %#v", expected, actual) } }
func TestStatusRecordRejectionConflict(t *testing.T) { now := nowFn() nowFn = func() unversioned.Time { return now } touched := unversioned.Time{Time: now.Add(-time.Minute)} p := &fakePlugin{} c := testclient.NewSimpleFake(&routeapi.Route{ObjectMeta: kapi.ObjectMeta{Name: "route1", Namespace: "default", UID: types.UID("uid1")}}) c.PrependReactor("update", "routes", func(action core.Action) (handled bool, ret runtime.Object, err error) { if action.GetSubresource() != "status" { return false, nil, nil } return true, nil, errors.NewConflict(kapi.Resource("Route"), "route1", nil) }) admitter := NewStatusAdmitter(p, c, "test") admitter.RecordRouteRejection(&routeapi.Route{ ObjectMeta: kapi.ObjectMeta{Name: "route1", Namespace: "default", UID: types.UID("uid1")}, Spec: routeapi.RouteSpec{Host: "route1.test.local"}, Status: routeapi.RouteStatus{ Ingress: []routeapi.RouteIngress{ { Host: "route2.test.local", RouterName: "test", Conditions: []routeapi.RouteIngressCondition{ { Type: routeapi.RouteAdmitted, Status: kapi.ConditionFalse, LastTransitionTime: &touched, }, }, }, }, }, }, "Failed", "generic error") if len(c.Actions()) != 1 { t.Fatalf("unexpected actions: %#v", c.Actions()) } action := c.Actions()[0] if action.GetVerb() != "update" || action.GetResource().Resource != "routes" || action.GetSubresource() != "status" { t.Fatalf("unexpected action: %#v", action) } obj := c.Actions()[0].(core.UpdateAction).GetObject().(*routeapi.Route) if len(obj.Status.Ingress) != 1 || obj.Status.Ingress[0].Host != "route1.test.local" { t.Fatalf("expected route reset: %#v", obj) } condition := obj.Status.Ingress[0].Conditions[0] if condition.LastTransitionTime == nil || *condition.LastTransitionTime != now || condition.Status != kapi.ConditionFalse || condition.Reason != "Failed" || condition.Message != "generic error" { t.Fatalf("unexpected condition: %#v", condition) } if v, ok := admitter.expected.Peek(types.UID("uid1")); ok { t.Fatalf("expected empty time: %#v", v) } }
func doTestPluginAttachDetach(t *testing.T, spec *volume.Spec, tmpDir string) { plugMgr := volume.VolumePluginMgr{} installPluginUnderTest(t, "kubernetes.io", "fakeAttacher", tmpDir, execScriptTempl1, nil) plugMgr.InitPlugins(ProbeVolumePlugins(tmpDir), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */)) plugin, err := plugMgr.FindPluginByName("kubernetes.io/fakeAttacher") if err != nil { t.Errorf("Can't find the plugin by name") } fake := &mount.FakeMounter{} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} secretMap := make(map[string]string) secretMap["flexsecret"] = base64.StdEncoding.EncodeToString([]byte("foo")) mounter, err := plugin.(*flexVolumePlugin).newMounterInternal(spec, pod, &flexVolumeUtil{}, fake, exec.New(), secretMap) volumePath := mounter.GetPath() if err != nil { t.Errorf("Failed to make a new Mounter: %v", err) } if mounter == nil { t.Errorf("Got a nil Mounter") } path := mounter.GetPath() expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~fakeAttacher/vol1", tmpDir) if path != expectedPath { t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path) } if err := mounter.SetUp(nil); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volumePath); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", volumePath) } else { t.Errorf("SetUp() failed: %v", err) } } t.Logf("Setup successful") if mounter.(*flexVolumeMounter).readOnly { t.Errorf("The volume source should not be read-only and it is.") } if len(fake.Log) != 1 { t.Errorf("Mount was not called exactly one time. It was called %d times.", len(fake.Log)) } else { if fake.Log[0].Action != mount.FakeActionMount { t.Errorf("Unexpected mounter action: %#v", fake.Log[0]) } } fake.ResetLog() unmounter, err := plugin.(*flexVolumePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &flexVolumeUtil{}, fake, exec.New()) if err != nil { t.Errorf("Failed to ma
func TestStatusBackoffOnConflict(t *testing.T) { now := unversioned.Now() nowFn = func() unversioned.Time { return now } touched := unversioned.Time{Time: now.Add(-time.Minute)} p := &fakePlugin{} c := testclient.NewSimpleFake(&(errors.NewConflict(kapi.Resource("Route"), "route1", nil).(*errors.StatusError).ErrStatus)) admitter := NewStatusAdmitter(p, c, "test") err := admitter.HandleRoute(watch.Added, &routeapi.Route{ ObjectMeta: kapi.ObjectMeta{Name: "route1", Namespace: "default", UID: types.UID("uid1")}, Spec: routeapi.RouteSpec{Host: "route1.test.local"}, Status: routeapi.RouteStatus{ Ingress: []routeapi.RouteIngress{ { Host: "route2.test.local", RouterName: "test", Conditions: []routeapi.RouteIngressCondition{ { Type: routeapi.RouteAdmitted, Status: kapi.ConditionFalse, LastTransitionTime: &touched, }, }, }, }, }, }) if len(c.Actions()) != 1 { t.Fatalf("unexpected actions: %#v", c.Actions()) } action := c.Actions()[0] if action.GetVerb() != "update" || action.GetResource() != "routes" || action.GetSubresource() != "status" { t.Fatalf("unexpected action: %#v", action) } obj := c.Actions()[0].(ktestclient.UpdateAction).GetObject().(*routeapi.Route) if len(obj.Status.Ingress) != 1 && obj.Status.Ingress[0].Host != "route1.test.local" { t.Fatalf("expected route reset: %#v", obj) } condition := obj.Status.Ingress[0].Conditions[0] if condition.LastTransitionTime == nil || *condition.LastTransitionTime != now || condition.Status != kapi.ConditionTrue || condition.Reason != "" { t.Fatalf("unexpected condition: %#v", condition) } if err == nil { t.Fatalf("unexpected non-error: %#v", admitter.expected) } if v, ok := admitter.expected.Peek(types.UID("uid1")); !ok || !reflect.DeepEqual(v, time.Time{}) { t.Fatalf("expected empty time: %#v", v) } }
func createEvent(eventType eventType, selfUID string, owners []string) event { var ownerReferences []api.OwnerReference for i := 0; i < len(owners); i++ { ownerReferences = append(ownerReferences, api.OwnerReference{UID: types.UID(owners[i])}) } return event{ eventType: eventType, obj: &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: types.UID(selfUID), OwnerReferences: ownerReferences, }, }, } }
func makeIdentity(uid string, providerName string, providerUserName string, userUID string, userName string) *api.Identity { return &api.Identity{ ObjectMeta: kapi.ObjectMeta{ Name: providerName + ":" + providerUserName, UID: types.UID(uid), }, ProviderName: providerName, ProviderUserName: providerUserName, User: kapi.ObjectReference{ UID: types.UID(userUID), Name: userName, }, Extra: map[string]string{}, } }
func NewHollowProxyOrDie( nodeName string, client clientset.Interface, endpointsConfig *proxyconfig.EndpointsConfig, serviceConfig *proxyconfig.ServiceConfig, iptInterface utiliptables.Interface, broadcaster record.EventBroadcaster, recorder record.EventRecorder, ) *HollowProxy { // Create and start Hollow Proxy config := options.NewProxyConfig() config.OOMScoreAdj = util.Int32Ptr(0) config.ResourceContainer = "" config.NodeRef = &api.ObjectReference{ Kind: "Node", Name: nodeName, UID: types.UID(nodeName), Namespace: "", } proxyconfig.NewSourceAPI( client.Core().RESTClient(), 30*time.Second, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), ) hollowProxy, err := proxyapp.NewProxyServer(client, config, iptInterface, &FakeProxier{}, broadcaster, recorder, nil, "fake") if err != nil { glog.Fatalf("Error while creating ProxyServer: %v\n", err) } return &HollowProxy{ ProxyServer: hollowProxy, } }
func (d *NodeDescriber) Describe(namespace, name string) (string, error) { mc := d.Nodes() node, err := mc.Get(name) if err != nil { return "", err } var pods []*api.Pod allPods, err := d.Pods(namespace).List(labels.Everything(), fields.Everything()) if err != nil { return "", err } for i := range allPods.Items { pod := &allPods.Items[i] if pod.Spec.NodeName != name { continue } pods = append(pods, pod) } var events *api.EventList if ref, err := api.GetReference(node); err != nil { glog.Errorf("Unable to construct reference to '%#v': %v", node, err) } else { // TODO: We haven't decided the namespace for Node object yet. ref.UID = types.UID(ref.Name) events, _ = d.Events("").Search(ref) } return describeNode(node, pods, events) }
// Unpacks a container name, returning the pod full name and container name we would have used to // construct the docker name. If we are unable to parse the name, an error is returned. func ParseDockerName(name string) (dockerName *KubeletContainerName, hash uint64, err error) { // For some reason docker appears to be appending '/' to names. // If it's there, strip it. name = strings.TrimPrefix(name, "/") parts := strings.Split(name, "_") if len(parts) == 0 || parts[0] != containerNamePrefix { err = fmt.Errorf("failed to parse Docker container name %q into parts", name) return nil, 0, err } if len(parts) < 6 { // We have at least 5 fields. We may have more in the future. // Anything with less fields than this is not something we can // manage. glog.Warningf("found a container with the %q prefix, but too few fields (%d): %q", containerNamePrefix, len(parts), name) err = fmt.Errorf("Docker container name %q has less parts than expected %v", name, parts) return nil, 0, err } nameParts := strings.Split(parts[1], ".") containerName := nameParts[0] if len(nameParts) > 1 { hash, err = strconv.ParseUint(nameParts[1], 16, 32) if err != nil { glog.Warningf("invalid container hash %q in container %q", nameParts[1], name) } } podFullName := parts[2] + "_" + parts[3] podUID := types.UID(parts[4]) return &KubeletContainerName{podFullName, podUID, containerName}, hash, nil }
func TestBuilderAndCleanerTypeAssert(t *testing.T) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{ VolumeID: "pd", FSType: "ext4", }, }, } builder, err := plug.(*awsElasticBlockStorePlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) if _, ok := builder.(volume.Cleaner); ok { t.Errorf("Volume Builder can be type-assert to Cleaner") } cleaner, err := plug.(*awsElasticBlockStorePlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) if _, ok := cleaner.(volume.Builder); ok { t.Errorf("Volume Cleaner can be type-assert to Builder") } }
// TestDependentsRace relies on golang's data race detector to check if there is // data race among in the dependents field. func TestDependentsRace(t *testing.T) { config := &restclient.Config{} config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()} metaOnlyClientPool := dynamic.NewClientPool(config, dynamic.LegacyAPIPathResolverFunc) config.ContentConfig.NegotiatedSerializer = nil clientPool := dynamic.NewClientPool(config, dynamic.LegacyAPIPathResolverFunc) podResource := []unversioned.GroupVersionResource{{Version: "v1", Resource: "pods"}} gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, podResource) if err != nil { t.Fatal(err) } const updates = 100 owner := &node{dependents: make(map[*node]struct{})} ownerUID := types.UID("owner") gc.propagator.uidToNode.Write(owner) go func() { for i := 0; i < updates; i++ { dependent := &node{} gc.propagator.addDependentToOwners(dependent, []metatypes.OwnerReference{{UID: ownerUID}}) gc.propagator.removeDependentFromOwners(dependent, []metatypes.OwnerReference{{UID: ownerUID}}) } }() go func() { gc.orphanQueue.Add(owner) for i := 0; i < updates; i++ { gc.orphanFinalizer() } }() }
func TestUpdatePod(t *testing.T) { podWorkers, processed := createPodWorkers() // Check whether all pod updates will be processed. numPods := 20 for i := 0; i < numPods; i++ { for j := i; j < numPods; j++ { podWorkers.UpdatePod(newPod(string(j), string(i)), nil, func() {}) } } drainWorkers(podWorkers, numPods) if len(processed) != 20 { t.Errorf("Not all pods processed: %v", len(processed)) return } for i := 0; i < numPods; i++ { uid := types.UID(i) if len(processed[uid]) < 1 || len(processed[uid]) > i+1 { t.Errorf("Pod %v processed %v times", i, len(processed[uid])) continue } first := 0 last := len(processed[uid]) - 1 if processed[uid][first] != string(0) { t.Errorf("Pod %v: incorrect order %v, %v", i, first, processed[uid][first]) } if processed[uid][last] != string(i) { t.Errorf("Pod %v: incorrect order %v, %v", i, last, processed[uid][last]) } } }
func TestPluginBackCompat(t *testing.T) { basePath, err := utiltesting.MkTmpdir("emptydirTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(basePath) plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath) spec := &api.Volume{ Name: "vol1", } pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, err := plug.NewBuilder(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{RootContext: ""}) if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder") } volPath := builder.GetPath() if volPath != path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/vol1") { t.Errorf("Got unexpected path: %s", volPath) } }
func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName string) error { if len(pod.UID) == 0 { hasher := md5.New() if isFile { fmt.Fprintf(hasher, "host:%s", nodeName) fmt.Fprintf(hasher, "file:%s", source) } else { fmt.Fprintf(hasher, "url:%s", source) } hash.DeepHashObject(hasher, pod) pod.UID = types.UID(hex.EncodeToString(hasher.Sum(nil)[0:])) glog.V(5).Infof("Generated UID %q pod %q from %s", pod.UID, pod.Name, source) } pod.Name = generatePodName(pod.Name, nodeName) glog.V(5).Infof("Generated Name %q for UID %q from URL %s", pod.Name, pod.UID, source) if pod.Namespace == "" { pod.Namespace = kubetypes.NamespaceDefault } glog.V(5).Infof("Using namespace %q for pod %q from %s", pod.Namespace, pod.Name, source) // Set the Host field to indicate this pod is scheduled on the current node. pod.Spec.NodeName = nodeName pod.ObjectMeta.SelfLink = getSelfLink(pod.Name, pod.Namespace) if pod.Annotations == nil { pod.Annotations = make(map[string]string) } // The generated UID is the hash of the file. pod.Annotations[kubetypes.ConfigHashAnnotationKey] = string(pod.UID) return nil }
// evictPodLogsDirectories evicts all evictable pod logs directories. Pod logs directories // are evictable if there are no corresponding pods. func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error { osInterface := cgc.manager.osInterface if allSourcesReady { // Only remove pod logs directories when all sources are ready. dirs, err := osInterface.ReadDir(podLogsRootDirectory) if err != nil { return fmt.Errorf("failed to read podLogsRootDirectory %q: %v", podLogsRootDirectory, err) } for _, dir := range dirs { name := dir.Name() podUID := types.UID(name) if !cgc.isPodDeleted(podUID) { continue } err := osInterface.RemoveAll(filepath.Join(podLogsRootDirectory, name)) if err != nil { glog.Errorf("Failed to remove pod logs directory %q: %v", name, err) } } } // Remove dead container log symlinks. // TODO(random-liu): Remove this after cluster logging supports CRI container log path. logSymlinks, _ := osInterface.Glob(filepath.Join(legacyContainerLogsDir, fmt.Sprintf("*.%s", legacyLogSuffix))) for _, logSymlink := range logSymlinks { if _, err := osInterface.Stat(logSymlink); os.IsNotExist(err) { err := osInterface.Remove(logSymlink) if err != nil { glog.Errorf("Failed to remove container log dead symlink %q: %v", logSymlink, err) } } } return nil }
func getContainerInfoFromLabel(labels map[string]string) *labelledContainerInfo { var err error containerInfo := &labelledContainerInfo{ PodName: getStringValueFromLabel(labels, kubernetesPodNameLabel), PodNamespace: getStringValueFromLabel(labels, kubernetesPodNamespaceLabel), PodUID: types.UID(getStringValueFromLabel(labels, kubernetesPodUIDLabel)), Name: getStringValueFromLabel(labels, kubernetesContainerNameLabel), Hash: getStringValueFromLabel(labels, kubernetesContainerHashLabel), TerminationMessagePath: getStringValueFromLabel(labels, kubernetesContainerTerminationMessagePathLabel), } if containerInfo.RestartCount, err = getIntValueFromLabel(labels, kubernetesContainerRestartCountLabel); err != nil { logError(containerInfo, kubernetesContainerRestartCountLabel, err) } if containerInfo.PodDeletionGracePeriod, err = getInt64PointerFromLabel(labels, kubernetesPodDeletionGracePeriodLabel); err != nil { logError(containerInfo, kubernetesPodDeletionGracePeriodLabel, err) } if containerInfo.PodTerminationGracePeriod, err = getInt64PointerFromLabel(labels, kubernetesPodTerminationGracePeriodLabel); err != nil { logError(containerInfo, kubernetesPodTerminationGracePeriodLabel, err) } preStopHandler := &api.Handler{} if found, err := getJsonObjectFromLabel(labels, kubernetesContainerPreStopHandlerLabel, preStopHandler); err != nil { logError(containerInfo, kubernetesContainerPreStopHandlerLabel, err) } else if found { containerInfo.PreStopHandler = preStopHandler } supplyContainerInfoWithOldLabel(labels, containerInfo) return containerInfo }
func TestEmptyHostDefaulting(t *testing.T) { strategy := NewStrategy(testAllocator{}) hostlessCreatedRoute := &api.Route{} strategy.PrepareForCreate(hostlessCreatedRoute) if hostlessCreatedRoute.Spec.Host != "mygeneratedhost.com" { t.Fatalf("Expected host to be allocated, got %s", hostlessCreatedRoute.Spec.Host) } persistedRoute := &api.Route{ ObjectMeta: kapi.ObjectMeta{ Namespace: "foo", Name: "myroute", UID: types.UID("abc"), ResourceVersion: "1", }, Spec: api.RouteSpec{ Host: "myhost.com", }, } obj, _ := kapi.Scheme.DeepCopy(persistedRoute) hostlessUpdatedRoute := obj.(*api.Route) hostlessUpdatedRoute.Spec.Host = "" strategy.PrepareForUpdate(hostlessUpdatedRoute, persistedRoute) if hostlessUpdatedRoute.Spec.Host != "myhost.com" { t.Fatalf("expected empty spec.host to default to existing spec.host, got %s", hostlessUpdatedRoute.Spec.Host) } }
func TestMounterAndUnmounterTypeAssert(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("awsebsTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &v1.Volume{ Name: "vol1", VolumeSource: v1.VolumeSource{ AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ VolumeID: "pd", FSType: "ext4", }, }, } mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) if _, ok := mounter.(volume.Unmounter); ok { t.Errorf("Volume Mounter can be type-assert to Unmounter") } unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) if _, ok := unmounter.(volume.Mounter); ok { t.Errorf("Volume Unmounter can be type-assert to Mounter") } }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("glusterfs_test") if err != nil { t.Fatalf("error creating temp dir: %v", err) } defer os.RemoveAll(tmpDir) pv := &v1.PersistentVolume{ ObjectMeta: v1.ObjectMeta{ Name: "pvA", }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false}, }, ClaimRef: &v1.ObjectReference{ Name: "claimA", }, }, } claim := &v1.PersistentVolumeClaim{ ObjectMeta: v1.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: v1.PersistentVolumeClaimStatus{ Phase: v1.ClaimBound, }, } ep := &v1.Endpoints{ ObjectMeta: v1.ObjectMeta{ Namespace: "nsA", Name: "ep", }, Subsets: []v1.EndpointSubset{{ Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, Ports: []v1.EndpointPort{{Name: "foo", Port: 80, Protocol: v1.ProtocolTCP}}, }}, } client := fake.NewSimpleClientset(pv, claim, ep) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil)) plug, _ := plugMgr.FindPluginByName(glusterfsPluginName) // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Namespace: "nsA", UID: types.UID("poduid")}} mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{}) if !mounter.GetAttributes().ReadOnly { t.Errorf("Expected true for mounter.IsReadOnly") } }
// newClaim returns a new claim with given attributes func newClaim(name, claimUID, capacity, boundToVolume string, phase api.PersistentVolumeClaimPhase, annotations ...string) *api.PersistentVolumeClaim { claim := api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: name, Namespace: testNamespace, UID: types.UID(claimUID), ResourceVersion: "1", }, Spec: api.PersistentVolumeClaimSpec{ AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany}, Resources: api.ResourceRequirements{ Requests: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse(capacity), }, }, VolumeName: boundToVolume, }, Status: api.PersistentVolumeClaimStatus{ Phase: phase, }, } // Make sure api.GetReference(claim) works claim.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", name) if len(annotations) > 0 { claim.Annotations = make(map[string]string) for _, a := range annotations { claim.Annotations[a] = "yes" } } return &claim }