func TestForgetNonExistingPodWorkers(t *testing.T) {
	podWorkers, _ := createPodWorkers()

	numPods := 20
	for i := 0; i < numPods; i++ {
		podWorkers.UpdatePod(newPod(string(i), "name"), nil, func() {})
	}
	drainWorkers(podWorkers, numPods)

	if len(podWorkers.podUpdates) != numPods {
		t.Errorf("Incorrect number of open channels %v", len(podWorkers.podUpdates))
	}

	desiredPods := map[types.UID]empty{}
	desiredPods[types.UID(2)] = empty{}
	desiredPods[types.UID(14)] = empty{}
	podWorkers.ForgetNonExistingPodWorkers(desiredPods)
	if len(podWorkers.podUpdates) != 2 {
		t.Errorf("Incorrect number of open channels %v", len(podWorkers.podUpdates))
	}
	if _, exists := podWorkers.podUpdates[types.UID(2)]; !exists {
		t.Errorf("No updates channel for pod 2")
	}
	if _, exists := podWorkers.podUpdates[types.UID(14)]; !exists {
		t.Errorf("No updates channel for pod 14")
	}

	podWorkers.ForgetNonExistingPodWorkers(map[types.UID]empty{})
	if len(podWorkers.podUpdates) != 0 {
		t.Errorf("Incorrect number of open channels %v", len(podWorkers.podUpdates))
	}
}
Example #2
0
func TestPluginLegacy(t *testing.T) {
	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))

	plug, err := plugMgr.FindPluginByName("gce-pd")
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}
	if plug.Name() != "gce-pd" {
		t.Errorf("Wrong name: %s", plug.Name())
	}
	if plug.CanSupport(&volume.Spec{Name: "foo", VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}}}) {
		t.Errorf("Expected false")
	}

	spec := &api.Volume{VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}}}
	pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
	if _, err := plug.NewBuilder(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{""}, nil); err == nil {
		t.Errorf("Expected failiure")
	}

	cleaner, err := plug.NewCleaner("vol1", types.UID("poduid"), nil)
	if err != nil {
		t.Errorf("Failed to make a new Cleaner: %v", err)
	}
	if cleaner == nil {
		t.Errorf("Got a nil Cleaner")
	}
}
Example #3
0
func TestPlugin(t *testing.T) {
	plug := makePluginUnderTest(t, "kubernetes.io/empty-dir")

	spec := &api.Volume{
		Name:         "vol1",
		VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumDefault}},
	}
	mounter := mount.FakeMounter{}
	mountDetector := fakeMountDetector{}
	pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
	builder, err := plug.(*emptyDirPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), pod, &mounter, &mountDetector, volume.VolumeOptions{""})
	if err != nil {
		t.Errorf("Failed to make a new Builder: %v", err)
	}
	if builder == nil {
		t.Errorf("Got a nil Builder")
	}

	volPath := builder.GetPath()
	if volPath != path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/vol1") {
		t.Errorf("Got unexpected path: %s", volPath)
	}

	if err := builder.SetUp(); err != nil {
		t.Errorf("Expected success, got: %v", err)
	}
	if _, err := os.Stat(volPath); err != nil {
		if os.IsNotExist(err) {
			t.Errorf("SetUp() failed, volume path not created: %s", volPath)
		} else {
			t.Errorf("SetUp() failed: %v", err)
		}
	}
	if len(mounter.Log) != 0 {
		t.Errorf("Expected 0 mounter calls, got %#v", mounter.Log)
	}
	mounter.ResetLog()

	cleaner, err := plug.(*emptyDirPlugin).newCleanerInternal("vol1", types.UID("poduid"), &mounter, &fakeMountDetector{})
	if err != nil {
		t.Errorf("Failed to make a new Cleaner: %v", err)
	}
	if cleaner == nil {
		t.Errorf("Got a nil Cleaner")
	}

	if err := cleaner.TearDown(); err != nil {
		t.Errorf("Expected success, got: %v", err)
	}
	if _, err := os.Stat(volPath); err == nil {
		t.Errorf("TearDown() failed, volume path still exists: %s", volPath)
	} else if !os.IsNotExist(err) {
		t.Errorf("SetUp() failed: %v", err)
	}
	if len(mounter.Log) != 0 {
		t.Errorf("Expected 0 mounter calls, got %#v", mounter.Log)
	}
	mounter.ResetLog()
}
Example #4
0
func TestPlugin(t *testing.T) {
	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), newTestHost(t))

	plug, err := plugMgr.FindPluginByName("kubernetes.io/git-repo")
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}
	spec := &api.Volume{
		Name: "vol1",
		VolumeSource: api.VolumeSource{
			GitRepo: &api.GitRepoVolumeSource{
				Repository: "https://github.com/GoogleCloudPlatform/kubernetes.git",
				Revision:   "2a30ce65c5ab586b98916d83385c5983edd353a1",
			},
		},
	}
	pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
	builder, err := plug.NewBuilder(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{""}, mount.New())
	if err != nil {
		t.Errorf("Failed to make a new Builder: %v", err)
	}
	if builder == nil {
		t.Errorf("Got a nil Builder")
	}

	path := builder.GetPath()
	if !strings.HasSuffix(path, "pods/poduid/volumes/kubernetes.io~git-repo/vol1") {
		t.Errorf("Got unexpected path: %s", path)
	}

	testSetUp(plug, builder, t)
	if _, err := os.Stat(path); err != nil {
		if os.IsNotExist(err) {
			t.Errorf("SetUp() failed, volume path not created: %s", path)
		} else {
			t.Errorf("SetUp() failed: %v", err)
		}
	}

	cleaner, err := plug.NewCleaner("vol1", types.UID("poduid"), mount.New())
	if err != nil {
		t.Errorf("Failed to make a new Cleaner: %v", err)
	}
	if cleaner == nil {
		t.Errorf("Got a nil Cleaner")
	}

	if err := cleaner.TearDown(); err != nil {
		t.Errorf("Expected success, got: %v", err)
	}
	if _, err := os.Stat(path); err == nil {
		t.Errorf("TearDown() failed, volume path still exists: %s", path)
	} else if !os.IsNotExist(err) {
		t.Errorf("SetUp() failed: %v", err)
	}
}
Example #5
0
func doTestPlugin(t *testing.T, spec *volume.Spec) {
	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))

	plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}
	builder, err := plug.(*rbdPlugin).newBuilderInternal(spec, types.UID("poduid"), &fakeDiskManager{}, &mount.FakeMounter{}, "secrets")
	if err != nil {
		t.Errorf("Failed to make a new Builder: %v", err)
	}
	if builder == nil {
		t.Errorf("Got a nil Builder: %v")
	}

	path := builder.GetPath()
	if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~rbd/vol1" {
		t.Errorf("Got unexpected path: %s", path)
	}

	if err := builder.SetUp(); err != nil {
		t.Errorf("Expected success, got: %v", err)
	}
	if _, err := os.Stat(path); err != nil {
		if os.IsNotExist(err) {
			t.Errorf("SetUp() failed, volume path not created: %s", path)
		} else {
			t.Errorf("SetUp() failed: %v", err)
		}
	}
	if _, err := os.Stat(path); err != nil {
		if os.IsNotExist(err) {
			t.Errorf("SetUp() failed, volume path not created: %s", path)
		} else {
			t.Errorf("SetUp() failed: %v", err)
		}
	}

	cleaner, err := plug.(*rbdPlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakeDiskManager{}, &mount.FakeMounter{})
	if err != nil {
		t.Errorf("Failed to make a new Cleaner: %v", err)
	}
	if cleaner == nil {
		t.Errorf("Got a nil Cleaner: %v")
	}

	if err := cleaner.TearDown(); err != nil {
		t.Errorf("Expected success, got: %v", err)
	}
	if _, err := os.Stat(path); err == nil {
		t.Errorf("TearDown() failed, volume path still exists: %s", path)
	} else if !os.IsNotExist(err) {
		t.Errorf("SetUp() failed: %v", err)
	}
}
func TestPlugin(t *testing.T) {
	plugMgr := volume.PluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), &volume.FakeHost{"/tmp/fake"})

	plug, err := plugMgr.FindPluginByName("kubernetes.io/empty-dir")
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}
	spec := &api.Volume{
		Name:   "vol1",
		Source: &api.VolumeSource{EmptyDir: &api.EmptyDir{}},
	}
	builder, err := plug.NewBuilder(spec, types.UID("poduid"))
	if err != nil {
		t.Errorf("Failed to make a new Builder: %v", err)
	}
	if builder == nil {
		t.Errorf("Got a nil Builder: %v")
	}

	path := builder.GetPath()
	if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~empty-dir/vol1" {
		t.Errorf("Got unexpected path: %s", path)
	}

	if err := builder.SetUp(); err != nil {
		t.Errorf("Expected success, got: %v", err)
	}
	if _, err := os.Stat(path); err != nil {
		if os.IsNotExist(err) {
			t.Errorf("SetUp() failed, volume path not created: %s", path)
		} else {
			t.Errorf("SetUp() failed: %v", err)
		}
	}

	cleaner, err := plug.NewCleaner("vol1", types.UID("poduid"))
	if err != nil {
		t.Errorf("Failed to make a new Cleaner: %v", err)
	}
	if cleaner == nil {
		t.Errorf("Got a nil Cleaner: %v")
	}

	if err := cleaner.TearDown(); err != nil {
		t.Errorf("Expected success, got: %v", err)
	}
	if _, err := os.Stat(path); err == nil {
		t.Errorf("TearDown() failed, volume path still exists: %s", path)
	} else if !os.IsNotExist(err) {
		t.Errorf("SetUp() failed: %v", err)
	}
}
Example #7
0
func TestGetPods(t *testing.T) {
	manager, fakeDocker := newTestDockerManager()
	dockerContainers := []docker.APIContainers{
		{
			ID:    "1111",
			Names: []string{"/k8s_foo_qux_new_1234_42"},
		},
		{
			ID:    "2222",
			Names: []string{"/k8s_bar_qux_new_1234_42"},
		},
		{
			ID:    "3333",
			Names: []string{"/k8s_bar_jlk_wen_5678_42"},
		},
	}

	// Convert the docker containers. This does not affect the test coverage
	// because the conversion is tested separately in convert_test.go
	containers := make([]*kubecontainer.Container, len(dockerContainers))
	for i := range containers {
		c, err := toRuntimeContainer(&dockerContainers[i])
		if err != nil {
			t.Fatalf("unexpected error %v", err)
		}
		containers[i] = c
	}

	expected := []*kubecontainer.Pod{
		{
			ID:         types.UID("1234"),
			Name:       "qux",
			Namespace:  "new",
			Containers: []*kubecontainer.Container{containers[0], containers[1]},
		},
		{
			ID:         types.UID("5678"),
			Name:       "jlk",
			Namespace:  "wen",
			Containers: []*kubecontainer.Container{containers[2]},
		},
	}

	fakeDocker.ContainerList = dockerContainers
	actual, err := manager.GetPods(false)
	if err != nil {
		t.Fatalf("unexpected error %v", err)
	}
	if !verifyPods(expected, actual) {
		t.Errorf("expected %#v, got %#v", expected, actual)
	}
}
func exampleManifestAndPod(id string) (v1beta1.ContainerManifest, *api.Pod) {
	hostname := "an-example-host"

	manifest := v1beta1.ContainerManifest{
		Version: "v1beta1",
		ID:      id,
		UUID:    types.UID(id),
		Containers: []v1beta1.Container{
			{
				Name:  "c" + id,
				Image: "foo",
				TerminationMessagePath: "/somepath",
			},
		},
		Volumes: []v1beta1.Volume{
			{
				Name: "host-dir",
				Source: v1beta1.VolumeSource{
					HostDir: &v1beta1.HostPathVolumeSource{"/dir/path"},
				},
			},
		},
	}
	expectedPod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:      id + "-" + hostname,
			UID:       types.UID(id),
			Namespace: kubelet.NamespaceDefault,
			SelfLink:  getSelfLink(id+"-"+hostname, kubelet.NamespaceDefault),
		},
		Spec: api.PodSpec{
			Host: hostname,
			Containers: []api.Container{
				{
					Name:  "c" + id,
					Image: "foo",
				},
			},
			Volumes: []api.Volume{
				{
					Name: "host-dir",
					VolumeSource: api.VolumeSource{
						HostPath: &api.HostPathVolumeSource{"/dir/path"},
					},
				},
			},
		},
	}
	return manifest, expectedPod
}
Example #9
0
// Unpacks a container name, returning the pod full name and container name we would have used to
// construct the docker name. If we are unable to parse the name, an error is returned.
func ParseDockerName(name string) (dockerName *KubeletContainerName, hash uint64, err error) {
	// For some reason docker appears to be appending '/' to names.
	// If it's there, strip it.
	name = strings.TrimPrefix(name, "/")
	parts := strings.Split(name, "_")
	if len(parts) == 0 || parts[0] != containerNamePrefix {
		err = fmt.Errorf("failed to parse Docker container name %q into parts", name)
		return nil, 0, err
	}
	if len(parts) < 6 {
		// We have at least 5 fields.  We may have more in the future.
		// Anything with less fields than this is not something we can
		// manage.
		glog.Warningf("found a container with the %q prefix, but too few fields (%d): %q", containerNamePrefix, len(parts), name)
		err = fmt.Errorf("Docker container name %q has less parts than expected %v", name, parts)
		return nil, 0, err
	}

	nameParts := strings.Split(parts[1], ".")
	containerName := nameParts[0]
	if len(nameParts) > 1 {
		hash, err = strconv.ParseUint(nameParts[1], 16, 32)
		if err != nil {
			glog.Warningf("invalid container hash %q in container %q", nameParts[1], name)
		}
	}

	podFullName := parts[2] + "_" + parts[3]
	podUID := types.UID(parts[4])

	return &KubeletContainerName{podFullName, podUID, containerName}, hash, nil
}
Example #10
0
func TestBuilderAndCleanerTypeAssert(t *testing.T) {
	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))

	plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}
	spec := &api.Volume{
		Name: "vol1",
		VolumeSource: api.VolumeSource{
			AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
				VolumeID: "pd",
				FSType:   "ext4",
			},
		},
	}

	builder, err := plug.(*awsElasticBlockStorePlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
	if _, ok := builder.(volume.Cleaner); ok {
		t.Errorf("Volume Builder can be type-assert to Cleaner")
	}

	cleaner, err := plug.(*awsElasticBlockStorePlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
	if _, ok := cleaner.(volume.Builder); ok {
		t.Errorf("Volume Cleaner can be type-assert to Builder")
	}
}
Example #11
0
func TestPluginLegacy(t *testing.T) {
	plugMgr := volume.PluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeHost("/tmp/fake", nil, nil))

	plug, err := plugMgr.FindPluginByName("gce-pd")
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}
	if plug.Name() != "gce-pd" {
		t.Errorf("Wrong name: %s", plug.Name())
	}
	if plug.CanSupport(&api.Volume{VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}}}) {
		t.Errorf("Expected false")
	}

	if _, err := plug.NewBuilder(&api.Volume{VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}}}, &api.ObjectReference{UID: types.UID("poduid")}); err == nil {
		t.Errorf("Expected failiure")
	}

	cleaner, err := plug.NewCleaner("vol1", types.UID("poduid"))
	if err != nil {
		t.Errorf("Failed to make a new Cleaner: %v", err)
	}
	if cleaner == nil {
		t.Errorf("Got a nil Cleaner: %v")
	}
}
Example #12
0
func extractFromFile(filename string) (api.BoundPod, error) {
	var pod api.BoundPod

	glog.V(3).Infof("Reading config file %q", filename)
	file, err := os.Open(filename)
	if err != nil {
		return pod, err
	}
	defer file.Close()

	data, err := ioutil.ReadAll(file)
	if err != nil {
		return pod, err
	}

	manifest := &api.ContainerManifest{}
	// TODO: use api.Scheme.DecodeInto
	if err := yaml.Unmarshal(data, manifest); err != nil {
		return pod, fmt.Errorf("can't unmarshal file %q: %v", filename, err)
	}

	if err := api.Scheme.Convert(manifest, &pod); err != nil {
		return pod, fmt.Errorf("can't convert pod from file %q: %v", filename, err)
	}

	hostname, err := os.Hostname() //TODO: kubelet name would be better
	if err != nil {
		return pod, err
	}

	if len(pod.UID) == 0 {
		hasher := md5.New()
		fmt.Fprintf(hasher, "host:%s", hostname)
		fmt.Fprintf(hasher, "file:%s", filename)
		util.DeepHashObject(hasher, pod)
		pod.UID = types.UID(hex.EncodeToString(hasher.Sum(nil)[0:]))
		glog.V(5).Infof("Generated UID %q for pod %q from file %s", pod.UID, pod.Name, filename)
	}
	if len(pod.Namespace) == 0 {
		hasher := adler32.New()
		fmt.Fprint(hasher, filename)
		// TODO: file-<sum>.hostname would be better, if DNS subdomains
		// are allowed for namespace (some places only allow DNS
		// labels).
		pod.Namespace = fmt.Sprintf("file-%08x-%s", hasher.Sum32(), hostname)
		glog.V(5).Infof("Generated namespace %q for pod %q from file %s", pod.Namespace, pod.Name, filename)
	}
	// TODO(dchen1107): BoundPod is not type of runtime.Object. Once we allow kubelet talks
	// about Pod directly, we can use SelfLinker defined in package: latest
	// Currently just simply follow the same format in resthandler.go
	pod.ObjectMeta.SelfLink = fmt.Sprintf("/api/v1beta2/pods/%s?namespace=%s",
		pod.Name, pod.Namespace)

	if glog.V(4) {
		glog.Infof("Got pod from file %q: %#v", filename, pod)
	} else {
		glog.V(1).Infof("Got pod from file %q: %s.%s (%s)", filename, pod.Namespace, pod.Name, pod.UID)
	}
	return pod, nil
}
func TestPluginLegacy(t *testing.T) {
	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), newTestHost(t))

	plug, err := plugMgr.FindPluginByName("git")
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}
	if plug.Name() != "git" {
		t.Errorf("Wrong name: %s", plug.Name())
	}
	if plug.CanSupport(&volume.Spec{Name: "foo", VolumeSource: api.VolumeSource{GitRepo: &api.GitRepoVolumeSource{}}}) {
		t.Errorf("Expected false")
	}

	spec := &api.Volume{VolumeSource: api.VolumeSource{GitRepo: &api.GitRepoVolumeSource{}}}
	if _, err := plug.NewBuilder(volume.NewSpecFromVolume(spec), &api.ObjectReference{UID: types.UID("poduid")}, volume.VolumeOptions{""}); err == nil {
		t.Errorf("Expected failiure")
	}

	cleaner, err := plug.NewCleaner("vol1", types.UID("poduid"))
	if err != nil {
		t.Errorf("Failed to make a new Cleaner: %v", err)
	}
	if cleaner == nil {
		t.Errorf("Got a nil Cleaner")
	}
}
func TestPluginLegacy(t *testing.T) {
	plugMgr := volume.PluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), newTestHost(t))

	plug, err := plugMgr.FindPluginByName("git")
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}
	if plug.Name() != "git" {
		t.Errorf("Wrong name: %s", plug.Name())
	}
	if plug.CanSupport(&api.Volume{Source: &api.VolumeSource{GitRepo: &api.GitRepo{}}}) {
		t.Errorf("Expected false")
	}

	if _, err := plug.NewBuilder(&api.Volume{Source: &api.VolumeSource{GitRepo: &api.GitRepo{}}}, types.UID("poduid")); err == nil {
		t.Errorf("Expected failiure")
	}

	cleaner, err := plug.NewCleaner("vol1", types.UID("poduid"))
	if err != nil {
		t.Errorf("Failed to make a new Cleaner: %v", err)
	}
	if cleaner == nil {
		t.Errorf("Got a nil Cleaner: %v")
	}
}
func TestNamespaceDelete(t *testing.T) {
	client := new(contrail_mocks.ApiClient)
	client.Init()
	allocator := new(mocks.AddressAllocator)
	networkMgr := new(mocks.NetworkManager)
	controller := NewTestController(nil, client, allocator, networkMgr)

	namespace := &api.Namespace{
		ObjectMeta: api.ObjectMeta{
			Name: "netns",
			UID:  kubetypes.UID(uuid.New()),
		},
	}

	project := new(types.Project)
	project.SetFQName("domain", []string{DefaultDomain, "netns"})
	project.SetUuid(string(namespace.ObjectMeta.UID))
	client.Create(project)

	shutdown := make(chan struct{})
	go controller.Run(shutdown)

	controller.DeleteNamespace(namespace)

	time.Sleep(100 * time.Millisecond)
	type shutdownMsg struct {
	}
	shutdown <- shutdownMsg{}

	_, err := client.FindByUuid("project", string(namespace.ObjectMeta.UID))
	assert.NotNil(t, err)
}
func TestNamespaceAdd(t *testing.T) {
	client := new(contrail_mocks.ApiClient)
	client.Init()
	allocator := new(mocks.AddressAllocator)
	networkMgr := new(mocks.NetworkManager)
	controller := NewTestController(nil, client, allocator, networkMgr)

	namespace := &api.Namespace{
		ObjectMeta: api.ObjectMeta{
			Name: "netns",
			UID:  kubetypes.UID(uuid.New()),
		},
	}

	shutdown := make(chan struct{})
	go controller.Run(shutdown)

	controller.AddNamespace(namespace)

	time.Sleep(100 * time.Millisecond)
	type shutdownMsg struct {
	}
	shutdown <- shutdownMsg{}

	obj, err := client.FindByUuid("project", string(namespace.ObjectMeta.UID))
	if err != nil {
		t.Fatalf("Namespace %s: Not found", string(namespace.ObjectMeta.UID))
	}
	assert.Equal(t, namespace.Name, obj.GetName())
}
Example #17
0
func applyDefaults(pod *api.Pod, url string) error {
	if len(pod.UID) == 0 {
		hasher := md5.New()
		fmt.Fprintf(hasher, "url:%s", url)
		util.DeepHashObject(hasher, pod)
		pod.UID = types.UID(hex.EncodeToString(hasher.Sum(nil)[0:]))
		glog.V(5).Infof("Generated UID %q for pod %q from URL %s", pod.UID, pod.Name, url)
	}
	// This is required for backward compatibility, and should be removed once we
	// completely deprecate ContainerManifest.
	var err error
	if len(pod.Name) == 0 {
		pod.Name = string(pod.UID)
	}
	pod.Name, err = GeneratePodName(pod.Name)
	if err != nil {
		return err
	}
	glog.V(5).Infof("Generated Name %q for UID %q from URL %s", pod.Name, pod.UID, url)

	// Always overrides the namespace.
	pod.Namespace = kubelet.NamespaceDefault
	glog.V(5).Infof("Using namespace %q for pod %q from URL %s", pod.Namespace, pod.Name, url)
	return nil
}
// Run a single container from a pod. Returns the docker container ID
func (dm *DockerManager) RunContainer(pod *api.Pod, container *api.Container, generator kubecontainer.RunContainerOptionsGenerator, runner kubecontainer.HandlerRunner, netMode, ipcMode string) (DockerID, error) {
	ref, err := kubecontainer.GenerateContainerRef(pod, container)
	if err != nil {
		glog.Errorf("Couldn't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err)
	}

	opts, err := generator.GenerateRunContainerOptions(pod, container, netMode, ipcMode)
	if err != nil {
		return "", err
	}

	id, err := dm.runContainerRecordErrorReason(pod, container, opts, ref)
	if err != nil {
		return "", err
	}

	// Remember this reference so we can report events about this container
	if ref != nil {
		dm.containerRefManager.SetRef(id, ref)
	}

	if container.Lifecycle != nil && container.Lifecycle.PostStart != nil {
		handlerErr := runner.Run(id, pod, container, container.Lifecycle.PostStart)
		if handlerErr != nil {
			dm.KillContainer(types.UID(id))
			return DockerID(""), fmt.Errorf("failed to call event handler: %v", handlerErr)
		}
	}
	return DockerID(id), err
}
Example #19
0
func TestName(t *testing.T) {
	var (
		testPodUID     = types.UID("test_pod_uid")
		testVolumeName = "test_name"
		testNamespace  = "test_metadata_namespace"
		testName       = "test_metadata_name"
	)

	volumeSpec := &api.Volume{
		Name: testVolumeName,
		VolumeSource: api.VolumeSource{
			Metadata: &api.MetadataVolumeSource{
				Items: []api.MetadataFile{
					{Name: "name_file_name", FieldRef: api.ObjectFieldSelector{
						FieldPath: "metadata.name"}}}},
		},
	}

	fake := testclient.NewSimpleFake(&api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:      testName,
			Namespace: testNamespace,
		},
	})

	pluginMgr := volume.VolumePluginMgr{}
	pluginMgr.InitPlugins(ProbeVolumePlugins(), newTestHost(t, fake))
	plugin, err := pluginMgr.FindPluginByName(metadataPluginName)
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}
	pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Name: testName}}
	builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}, &mount.FakeMounter{})
	if err != nil {
		t.Errorf("Failed to make a new Builder: %v", err)
	}
	if builder == nil {
		t.Errorf("Got a nil Builder")
	}

	volumePath := builder.GetPath()

	err = builder.SetUp()
	if err != nil {
		t.Errorf("Failed to setup volume: %v", err)
	}

	var data []byte
	data, err = ioutil.ReadFile(path.Join(volumePath, "name_file_name"))
	if err != nil {
		t.Errorf(err.Error())
	}

	if string(data) != testName {
		t.Errorf("Found `%s` expected %s", string(data), testName)
	}

	CleanEverything(plugin, testVolumeName, volumePath, testPodUID, t)

}
Example #20
0
func (d *NodeDescriber) Describe(namespace, name string) (string, error) {
	mc := d.Nodes()
	node, err := mc.Get(name)
	if err != nil {
		return "", err
	}

	var pods []*api.Pod
	allPods, err := d.Pods(namespace).List(labels.Everything(), fields.Everything())
	if err != nil {
		return "", err
	}
	for i := range allPods.Items {
		pod := &allPods.Items[i]
		if pod.Spec.NodeName != name {
			continue
		}
		pods = append(pods, pod)
	}

	var events *api.EventList
	if ref, err := api.GetReference(node); err != nil {
		glog.Errorf("Unable to construct reference to '%#v': %v", node, err)
	} else {
		// TODO: We haven't decided the namespace for Node object yet.
		ref.UID = types.UID(ref.Name)
		events, _ = d.Events("").Search(ref)
	}

	return describeNode(node, pods, events)
}
func TestPluginBackCompat(t *testing.T) {
	plugMgr := volume.PluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), &volume.FakeHost{"/tmp/fake"})

	plug, err := plugMgr.FindPluginByName("kubernetes.io/empty-dir")
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}
	spec := &api.Volume{
		Name:   "vol1",
		Source: nil,
	}
	builder, err := plug.NewBuilder(spec, types.UID("poduid"))
	if err != nil {
		t.Errorf("Failed to make a new Builder: %v", err)
	}
	if builder == nil {
		t.Errorf("Got a nil Builder: %v")
	}

	path := builder.GetPath()
	if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~empty-dir/vol1" {
		t.Errorf("Got unexpected path: %s", path)
	}
}
Example #22
0
func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName string) error {
	if len(pod.UID) == 0 {
		hasher := md5.New()
		if isFile {
			fmt.Fprintf(hasher, "host:%s", nodeName)
			fmt.Fprintf(hasher, "file:%s", source)
		} else {
			fmt.Fprintf(hasher, "url:%s", source)
		}
		util.DeepHashObject(hasher, pod)
		pod.UID = types.UID(hex.EncodeToString(hasher.Sum(nil)[0:]))
		glog.V(5).Infof("Generated UID %q pod %q from %s", pod.UID, pod.Name, source)
	}

	// This is required for backward compatibility, and should be removed once we
	// completely deprecate ContainerManifest.
	if len(pod.Name) == 0 {
		pod.Name = string(pod.UID)
	}
	pod.Name = generatePodName(pod.Name, nodeName)
	glog.V(5).Infof("Generated Name %q for UID %q from URL %s", pod.Name, pod.UID, source)

	if pod.Namespace == "" {
		pod.Namespace = kubelet.NamespaceDefault
	}
	glog.V(5).Infof("Using namespace %q for pod %q from %s", pod.Namespace, pod.Name, source)

	// Set the Host field to indicate this pod is scheduled on the current node.
	pod.Spec.NodeName = nodeName

	pod.ObjectMeta.SelfLink = getSelfLink(pod.Name, pod.Namespace)
	return nil
}
func TestUpdatePod(t *testing.T) {
	podWorkers, processed := createPodWorkers()

	// Check whether all pod updates will be processed.
	numPods := 20
	for i := 0; i < numPods; i++ {
		for j := i; j < numPods; j++ {
			podWorkers.UpdatePod(newPod(string(j), string(i)), nil, func() {})
		}
	}
	drainWorkers(podWorkers, numPods)

	if len(processed) != 20 {
		t.Errorf("Not all pods processed: %v", len(processed))
		return
	}
	for i := 0; i < numPods; i++ {
		uid := types.UID(i)
		if len(processed[uid]) < 1 || len(processed[uid]) > i+1 {
			t.Errorf("Pod %v processed %v times", i, len(processed[uid]))
			continue
		}

		first := 0
		last := len(processed[uid]) - 1
		if processed[uid][first] != string(0) {
			t.Errorf("Pod %v: incorrect order %v, %v", i, first, processed[uid][first])

		}
		if processed[uid][last] != string(i) {
			t.Errorf("Pod %v: incorrect order %v, %v", i, last, processed[uid][last])
		}
	}
}
func TestPodUsesNonExistingService(t *testing.T) {
	kube := mocks.NewKubeClient()

	client := new(contrail_mocks.ApiClient)
	client.Init()

	client.AddInterceptor("virtual-machine-interface", &VmiInterceptor{})
	client.AddInterceptor("virtual-network", &NetworkInterceptor{})

	allocator := new(mocks.AddressAllocator)

	controller := NewTestController(kube, client, allocator, nil)

	pod1 := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:      "test-xz1",
			Namespace: "testns",
			UID:       kubetypes.UID(uuid.New()),
			Labels: map[string]string{
				"name": "testpod",
				"uses": "nonexisting",
			},
		},
	}

	netnsProject := new(types.Project)
	netnsProject.SetFQName("", []string{"default-domain", "testns"})
	client.Create(netnsProject)

	allocator.On("LocateIpAddress", string(pod1.ObjectMeta.UID)).Return("10.0.0.1", nil)
	allocator.On("ReleaseIpAddress", string(pod1.ObjectMeta.UID)).Return()
	kube.PodInterface.On("Update", pod1).Return(pod1, nil)

	shutdown := make(chan struct{})
	go controller.Run(shutdown)

	controller.AddPod(pod1)
	time.Sleep(100 * time.Millisecond)

	_, err := types.NetworkPolicyByName(client, "default-domain:testns:nonexisting")
	assert.NoError(t, err)

	_, err = types.VirtualNetworkByName(client, "default-domain:testns:testpod")
	assert.NoError(t, err)

	controller.DeletePod(pod1)
	time.Sleep(100 * time.Millisecond)
	type shutdownMsg struct {
	}
	shutdown <- shutdownMsg{}

	_, err = types.VirtualNetworkByName(client, "default-domain:testns:testpod")
	assert.Error(t, err)

	_, err = types.NetworkPolicyByName(client, "default-domain:testns:nonexisting")
	assert.Error(t, err)

	allocator.AssertExpectations(t)
}
func newPod(uid, name string) *api.Pod {
	return &api.Pod{
		ObjectMeta: api.ObjectMeta{
			UID:  types.UID(uid),
			Name: name,
		},
	}
}
Example #26
0
func ExampleManifestAndPod(id string) (api.ContainerManifest, api.BoundPod) {
	manifest := api.ContainerManifest{
		ID:   id,
		UUID: types.UID(id),
		Containers: []api.Container{
			{
				Name:  "c" + id,
				Image: "foo",
				TerminationMessagePath: "/somepath",
			},
		},
		Volumes: []api.Volume{
			{
				Name: "host-dir",
				Source: &api.VolumeSource{
					HostDir: &api.HostDir{"/dir/path"},
				},
			},
		},
	}
	expectedPod := api.BoundPod{
		ObjectMeta: api.ObjectMeta{
			Name: id,
			UID:  types.UID(id),
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  "c" + id,
					Image: "foo",
					TerminationMessagePath: "/somepath",
				},
			},
			Volumes: []api.Volume{
				{
					Name: "host-dir",
					Source: &api.VolumeSource{
						HostDir: &api.HostDir{"/dir/path"},
					},
				},
			},
		},
	}
	return manifest, expectedPod
}
Example #27
0
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
	pv := &api.PersistentVolume{
		ObjectMeta: api.ObjectMeta{
			Name: "pvA",
		},
		Spec: api.PersistentVolumeSpec{
			PersistentVolumeSource: api.PersistentVolumeSource{
				Glusterfs: &api.GlusterfsVolumeSource{"ep", "vol", false},
			},
			ClaimRef: &api.ObjectReference{
				Name: "claimA",
			},
		},
	}

	claim := &api.PersistentVolumeClaim{
		ObjectMeta: api.ObjectMeta{
			Name:      "claimA",
			Namespace: "nsA",
		},
		Spec: api.PersistentVolumeClaimSpec{
			VolumeName: "pvA",
		},
		Status: api.PersistentVolumeClaimStatus{
			Phase: api.ClaimBound,
		},
	}

	ep := &api.Endpoints{
		ObjectMeta: api.ObjectMeta{
			Name: "ep",
		},
		Subsets: []api.EndpointSubset{{
			Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
			Ports:     []api.EndpointPort{{"foo", 80, api.ProtocolTCP}},
		}},
	}

	o := testclient.NewObjects(api.Scheme, api.Scheme)
	o.Add(pv)
	o.Add(claim)
	o.Add(ep)
	client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)}

	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil))
	plug, _ := plugMgr.FindPluginByName(glusterfsPluginName)

	// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
	spec := volume.NewSpecFromPersistentVolume(pv, true)
	pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
	builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}, nil)

	if !builder.IsReadOnly() {
		t.Errorf("Expected true for builder.IsReadOnly")
	}
}
Example #28
0
// serveStats implements stats logic.
func (s *Server) serveStats(w http.ResponseWriter, req *http.Request) {
	// /stats/<podfullname>/<containerName> or /stats/<namespace>/<podfullname>/<uid>/<containerName>
	components := strings.Split(strings.TrimPrefix(path.Clean(req.URL.Path), "/"), "/")
	var stats *info.ContainerInfo
	var err error
	var query info.ContainerInfoRequest
	err = json.NewDecoder(req.Body).Decode(&query)
	if err != nil && err != io.EOF {
		s.error(w, err)
		return
	}
	switch len(components) {
	case 1:
		// Machine stats
		stats, err = s.host.GetRootInfo(&query)
	case 2:
		// pod stats
		// TODO(monnand) Implement this
		errors.New("pod level status currently unimplemented")
	case 3:
		// Backward compatibility without uid information, does not support namespace
		pod, ok := s.host.GetPodByName(api.NamespaceDefault, components[1])
		if !ok {
			http.Error(w, "Pod does not exist", http.StatusNotFound)
			return
		}
		stats, err = s.host.GetContainerInfo(GetPodFullName(pod), "", components[2], &query)
	case 5:
		pod, ok := s.host.GetPodByName(components[1], components[2])
		if !ok {
			http.Error(w, "Pod does not exist", http.StatusNotFound)
			return
		}
		stats, err = s.host.GetContainerInfo(GetPodFullName(pod), types.UID(components[3]), components[4], &query)
	default:
		http.Error(w, "unknown resource.", http.StatusNotFound)
		return
	}
	if err != nil {
		s.error(w, err)
		return
	}
	if stats == nil {
		w.WriteHeader(http.StatusOK)
		fmt.Fprint(w, "{}")
		return
	}
	data, err := json.Marshal(stats)
	if err != nil {
		s.error(w, err)
		return
	}
	w.WriteHeader(http.StatusOK)
	w.Header().Add("Content-type", "application/json")
	w.Write(data)
	return
}
Example #29
0
/**
 * The UUID package is naive and can generate identical UUIDs if the time interval is quick enough.
 * Block subsequent UUIDs for 200 Nanoseconds, the UUID uses 100 ns increments, we block for 200 to be safe
 * Blocks in a go routine, so that the caller doesn't have to wait.
 * TODO: save old unused UUIDs so that no one has to block.
 */
func NewUUID() types.UID {
	uuidLock.Lock()
	result := uuid.NewUUID()
	go func() {
		time.Sleep(200 * time.Nanosecond)
		uuidLock.Unlock()
	}()
	return types.UID(result.String())
}
Example #30
0
// Test the case where the 'ready' file has been created and the pod volume dir
// is a mountpoint.  Mount should not be called.
func TestPluginIdempotent(t *testing.T) {
	var (
		testPodUID     = types.UID("test_pod_uid2")
		testVolumeName = "test_volume_name"
		testNamespace  = "test_secret_namespace"
		testName       = "test_secret_name"

		volumeSpec    = volumeSpec(testVolumeName, testName)
		secret        = secret(testNamespace, testName)
		client        = testclient.NewSimpleFake(&secret)
		pluginMgr     = volume.VolumePluginMgr{}
		rootDir, host = newTestHost(t, client)
	)

	pluginMgr.InitPlugins(ProbeVolumePlugins(), host)

	plugin, err := pluginMgr.FindPluginByName(secretPluginName)
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}

	podVolumeDir := fmt.Sprintf("%v/pods/test_pod_uid2/volumes/kubernetes.io~secret/test_volume_name", rootDir)
	podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid2/plugins/kubernetes.io~secret/test_volume_name", rootDir)
	pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}}
	mounter := &mount.FakeMounter{}
	mounter.MountPoints = []mount.MountPoint{
		{
			Path: podVolumeDir,
		},
	}
	util.SetReady(podMetadataDir)
	builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}, mounter)
	if err != nil {
		t.Errorf("Failed to make a new Builder: %v", err)
	}
	if builder == nil {
		t.Errorf("Got a nil Builder")
	}

	volumePath := builder.GetPath()
	err = builder.SetUp()
	if err != nil {
		t.Errorf("Failed to setup volume: %v", err)
	}

	if len(mounter.Log) != 0 {
		t.Errorf("Unexpected calls made to mounter: %v", mounter.Log)
	}

	if _, err := os.Stat(volumePath); err != nil {
		if !os.IsNotExist(err) {
			t.Errorf("SetUp() failed unexpectedly: %v", err)
		}
	} else {
		t.Errorf("volume path should not exist: %v", volumePath)
	}
}