Ejemplo n.º 1
0
func (oc *OvsController) updatePodNetwork(namespace string, netID, oldNetID uint) error {
	// Update OF rules for the existing/old pods in the namespace
	pods, err := oc.GetLocalPods(namespace)
	if err != nil {
		return err
	}
	for _, pod := range pods {
		err := oc.pluginHooks.UpdatePod(pod.Namespace, pod.Name, kubetypes.DockerID(pod.ContainerID))
		if err != nil {
			return err
		}
	}

	// Update OF rules for the old services in the namespace
	services, err := oc.Registry.GetServicesForNamespace(namespace)
	if err != nil {
		return err
	}
	for _, svc := range services {
		for _, port := range svc.Ports {
			oc.flowController.DelServiceOFRules(oldNetID, svc.IP, port.Protocol, port.Port)
			oc.flowController.AddServiceOFRules(netID, svc.IP, port.Protocol, port.Port)
		}
	}
	return nil
}
Ejemplo n.º 2
0
func (node *OsdnNode) updatePodNetwork(namespace string, netID uint32) error {
	// Update OF rules for the existing/old pods in the namespace
	pods, err := node.GetLocalPods(namespace)
	if err != nil {
		return err
	}
	for _, pod := range pods {
		err = node.UpdatePod(pod.Namespace, pod.Name, kubetypes.DockerID(getPodContainerID(&pod)))
		if err != nil {
			return err
		}
	}

	// Update OF rules for the old services in the namespace
	services, err := node.registry.GetServicesForNamespace(namespace)
	if err != nil {
		return err
	}
	errList := []error{}
	for _, svc := range services {
		if err = node.DeleteServiceRules(&svc); err != nil {
			log.Error(err)
		}
		if err = node.AddServiceRules(&svc, netID); err != nil {
			errList = append(errList, err)
		}
	}
	return kerrors.NewAggregate(errList)
}
Ejemplo n.º 3
0
func (plugin *ovsPlugin) PluginStartNode(mtu uint) error {
	networkChanged, err := plugin.SubnetStartNode(mtu)
	if err != nil {
		return err
	}

	if plugin.multitenant {
		if err := plugin.VnidStartNode(); err != nil {
			return err
		}
	}

	if networkChanged {
		pods, err := plugin.GetLocalPods(kapi.NamespaceAll)
		if err != nil {
			return err
		}
		for _, p := range pods {
			containerID := osdn.GetPodContainerID(&p)
			err = plugin.UpdatePod(p.Namespace, p.Name, kubeletTypes.DockerID(containerID))
			if err != nil {
				glog.Warningf("Could not update pod %q (%s): %s", p.Name, containerID, err)
			}
		}
	}

	return nil
}
Ejemplo n.º 4
0
func (m *podManager) getContainerNetnsPath(id string) (string, error) {
	runtime, ok := m.host.GetRuntime().(*dockertools.DockerManager)
	if !ok {
		return "", fmt.Errorf("openshift-sdn execution called on non-docker runtime")
	}
	return runtime.GetNetNS(kcontainer.DockerID(id).ContainerID())
}
Ejemplo n.º 5
0
func TestDockerContainerCommand(t *testing.T) {
	runner := &DockerManager{}
	containerID := kubecontainer.DockerID("1234").ContainerID()
	command := []string{"ls"}
	cmd, _ := runner.getRunInContainerCommand(containerID, command)
	if cmd.Dir != "/var/lib/docker/execdriver/native/"+containerID.ID {
		t.Errorf("unexpected command CWD: %s", cmd.Dir)
	}
	if !reflect.DeepEqual(cmd.Args, []string{"/usr/sbin/nsinit", "exec", "ls"}) {
		t.Errorf("unexpected command args: %s", cmd.Args)
	}
}
Ejemplo n.º 6
0
func (node *OsdnNode) Start() error {
	var err error
	node.networkInfo, err = getNetworkInfo(node.osClient)
	if err != nil {
		return fmt.Errorf("Failed to get network information: %v", err)
	}

	nodeIPTables := newNodeIPTables(node.networkInfo.ClusterNetwork.String(), node.iptablesSyncPeriod)
	if err = nodeIPTables.Setup(); err != nil {
		return fmt.Errorf("Failed to set up iptables: %v", err)
	}

	networkChanged, err := node.SetupSDN()
	if err != nil {
		return err
	}

	err = node.SubnetStartNode()
	if err != nil {
		return err
	}

	if node.multitenant {
		if err = node.VnidStartNode(); err != nil {
			return err
		}
		if err = node.SetupEgressNetworkPolicy(); err != nil {
			return err
		}
	}

	if networkChanged {
		var pods []kapi.Pod
		pods, err = node.GetLocalPods(kapi.NamespaceAll)
		if err != nil {
			return err
		}
		for _, p := range pods {
			containerID := getPodContainerID(&p)
			err = node.UpdatePod(p.Namespace, p.Name, kubeletTypes.DockerID(containerID))
			if err != nil {
				log.Warningf("Could not update pod %q (%s): %s", p.Name, containerID, err)
			}
		}
	}

	node.markPodNetworkReady()

	return nil
}
Ejemplo n.º 7
0
func (oc *OvsController) SubnetStartNode(mtu uint) error {
	err := oc.initSelfSubnet()
	if err != nil {
		return err
	}

	// Assume we are working with IPv4
	clusterNetworkCIDR, err := oc.Registry.GetClusterNetworkCIDR()
	if err != nil {
		log.Errorf("Failed to obtain ClusterNetwork: %v", err)
		return err
	}
	servicesNetworkCIDR, err := oc.Registry.GetServicesNetworkCIDR()
	if err != nil {
		log.Errorf("Failed to obtain ServicesNetwork: %v", err)
		return err
	}
	networkChanged, err := oc.flowController.Setup(oc.localSubnet.SubnetCIDR, clusterNetworkCIDR, servicesNetworkCIDR, mtu)
	if err != nil {
		return err
	}

	getSubnets := func(registry *Registry) (interface{}, string, error) {
		return registry.GetSubnets()
	}
	result, err := oc.watchAndGetResource("HostSubnet", watchSubnets, getSubnets)
	if err != nil {
		return err
	}
	subnets := result.([]api.Subnet)
	for _, s := range subnets {
		oc.flowController.AddOFRules(s.NodeIP, s.SubnetCIDR, oc.localIP)
	}

	if networkChanged {
		pods, err := oc.Registry.GetRunningPods(oc.hostName, kapi.NamespaceAll)
		if err != nil {
			return err
		}
		for _, p := range pods {
			err = oc.pluginHooks.UpdatePod(p.Namespace, p.Name, kubetypes.DockerID(p.ContainerID))
			if err != nil {
				log.Warningf("Could not update pod %q (%s): %s", p.Name, p.ContainerID, err)
			}
		}
	}

	return nil
}
Ejemplo n.º 8
0
func (node *OsdnNode) updatePodNetwork(namespace string, oldNetID, netID uint32) error {
	// FIXME: this is racy; traffic coming from the pods gets switched to the new
	// VNID before the service and firewall rules are updated to match. We need
	// to do the updates as a single transaction (ovs-ofctl --bundle).

	pods, err := node.GetLocalPods(namespace)
	if err != nil {
		return err
	}
	services, err := node.kClient.Services(namespace).List(kapi.ListOptions{})
	if err != nil {
		return err
	}

	errList := []error{}

	// Update OF rules for the existing/old pods in the namespace
	for _, pod := range pods {
		err = node.UpdatePod(pod.Namespace, pod.Name, kubetypes.DockerID(getPodContainerID(&pod)))
		if err != nil {
			errList = append(errList, err)
		}
	}

	// Update OF rules for the old services in the namespace
	for _, svc := range services.Items {
		if !kapi.IsServiceIPSet(&svc) {
			continue
		}

		if err = node.DeleteServiceRules(&svc); err != nil {
			log.Error(err)
		}
		if err = node.AddServiceRules(&svc, netID); err != nil {
			errList = append(errList, err)
		}
	}

	// Update namespace references in egress firewall rules
	if err = node.UpdateEgressNetworkPolicyVNID(namespace, oldNetID, netID); err != nil {
		errList = append(errList, err)
	}

	return kerrors.NewAggregate(errList)
}
Ejemplo n.º 9
0
func TestSyncPodsUnhealthy(t *testing.T) {
	const (
		unhealthyContainerID = "1234"
		infraContainerID     = "9876"
	)
	dm, fakeDocker := newTestDockerManager()
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			UID:       "12345678",
			Name:      "foo",
			Namespace: "new",
		},
		Spec: api.PodSpec{
			Containers: []api.Container{{Name: "unhealthy"}},
		},
	}

	fakeDocker.SetFakeRunningContainers([]*docker.Container{
		{
			ID:   unhealthyContainerID,
			Name: "/k8s_unhealthy_foo_new_12345678_42",
		},
		{
			ID:   infraContainerID,
			Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_42",
		}})
	dm.livenessManager.Set(kubecontainer.DockerID(unhealthyContainerID).ContainerID(), proberesults.Failure, pod)

	runSyncPod(t, dm, fakeDocker, pod, nil, false)

	verifyCalls(t, fakeDocker, []string{
		// Check the pod infra container.
		"inspect_container",
		// Kill the unhealthy container.
		"stop",
		// Restart the unhealthy container.
		"create", "start", "inspect_container",
	})

	if err := fakeDocker.AssertStopped([]string{unhealthyContainerID}); err != nil {
		t.Errorf("%v", err)
	}
}
Ejemplo n.º 10
0
// Converts dockertypes.Container to kubecontainer.Container.
func toRuntimeContainer(c *dockertypes.Container) (*kubecontainer.Container, error) {
	if c == nil {
		return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container")
	}

	dockerName, hash, err := getDockerContainerNameInfo(c)
	if err != nil {
		return nil, err
	}

	return &kubecontainer.Container{
		ID:    kubecontainer.DockerID(c.ID).ContainerID(),
		Name:  dockerName.ContainerName,
		Image: c.Image,
		Hash:  hash,
		// (random-liu) docker uses status to indicate whether a container is running or exited.
		// However, in kubernetes we usually use state to indicate whether a container is running or exited,
		// while use status to indicate the comprehensive status of the container. So we have different naming
		// norm here.
		State: mapState(c.Status),
	}, nil
}
Ejemplo n.º 11
0
func (oc *OsdnController) updatePodNetwork(namespace string, netID uint) error {
	// Update OF rules for the existing/old pods in the namespace
	pods, err := oc.GetLocalPods(namespace)
	if err != nil {
		return err
	}
	for _, pod := range pods {
		err := oc.pluginHooks.UpdatePod(pod.Namespace, pod.Name, kubetypes.DockerID(GetPodContainerID(&pod)))
		if err != nil {
			return err
		}
	}

	// Update OF rules for the old services in the namespace
	services, err := oc.Registry.GetServicesForNamespace(namespace)
	if err != nil {
		return err
	}
	for _, svc := range services {
		oc.pluginHooks.DeleteServiceRules(&svc)
		oc.pluginHooks.AddServiceRules(&svc, netID)
	}
	return nil
}
Ejemplo n.º 12
0
func TestFindContainersByPod(t *testing.T) {
	tests := []struct {
		containerList       []docker.APIContainers
		exitedContainerList []docker.APIContainers
		all                 bool
		expectedPods        []*kubecontainer.Pod
	}{

		{
			[]docker.APIContainers{
				{
					ID:    "foobar",
					Names: []string{"/k8s_foobar.1234_qux_ns_1234_42"},
				},
				{
					ID:    "barbar",
					Names: []string{"/k8s_barbar.1234_qux_ns_2343_42"},
				},
				{
					ID:    "baz",
					Names: []string{"/k8s_baz.1234_qux_ns_1234_42"},
				},
			},
			[]docker.APIContainers{
				{
					ID:    "barfoo",
					Names: []string{"/k8s_barfoo.1234_qux_ns_1234_42"},
				},
				{
					ID:    "bazbaz",
					Names: []string{"/k8s_bazbaz.1234_qux_ns_5678_42"},
				},
			},
			false,
			[]*kubecontainer.Pod{
				{
					ID:        "1234",
					Name:      "qux",
					Namespace: "ns",
					Containers: []*kubecontainer.Container{
						{
							ID:    kubecontainer.DockerID("foobar").ContainerID(),
							Name:  "foobar",
							Hash:  0x1234,
							State: kubecontainer.ContainerStateUnknown,
						},
						{
							ID:    kubecontainer.DockerID("baz").ContainerID(),
							Name:  "baz",
							Hash:  0x1234,
							State: kubecontainer.ContainerStateUnknown,
						},
					},
				},
				{
					ID:        "2343",
					Name:      "qux",
					Namespace: "ns",
					Containers: []*kubecontainer.Container{
						{
							ID:    kubecontainer.DockerID("barbar").ContainerID(),
							Name:  "barbar",
							Hash:  0x1234,
							State: kubecontainer.ContainerStateUnknown,
						},
					},
				},
			},
		},
		{
			[]docker.APIContainers{
				{
					ID:    "foobar",
					Names: []string{"/k8s_foobar.1234_qux_ns_1234_42"},
				},
				{
					ID:    "barbar",
					Names: []string{"/k8s_barbar.1234_qux_ns_2343_42"},
				},
				{
					ID:    "baz",
					Names: []string{"/k8s_baz.1234_qux_ns_1234_42"},
				},
			},
			[]docker.APIContainers{
				{
					ID:    "barfoo",
					Names: []string{"/k8s_barfoo.1234_qux_ns_1234_42"},
				},
				{
					ID:    "bazbaz",
					Names: []string{"/k8s_bazbaz.1234_qux_ns_5678_42"},
				},
			},
			true,
			[]*kubecontainer.Pod{
				{
					ID:        "1234",
					Name:      "qux",
					Namespace: "ns",
					Containers: []*kubecontainer.Container{
						{
							ID:    kubecontainer.DockerID("foobar").ContainerID(),
							Name:  "foobar",
							Hash:  0x1234,
							State: kubecontainer.ContainerStateUnknown,
						},
						{
							ID:    kubecontainer.DockerID("barfoo").ContainerID(),
							Name:  "barfoo",
							Hash:  0x1234,
							State: kubecontainer.ContainerStateUnknown,
						},
						{
							ID:    kubecontainer.DockerID("baz").ContainerID(),
							Name:  "baz",
							Hash:  0x1234,
							State: kubecontainer.ContainerStateUnknown,
						},
					},
				},
				{
					ID:        "2343",
					Name:      "qux",
					Namespace: "ns",
					Containers: []*kubecontainer.Container{
						{
							ID:    kubecontainer.DockerID("barbar").ContainerID(),
							Name:  "barbar",
							Hash:  0x1234,
							State: kubecontainer.ContainerStateUnknown,
						},
					},
				},
				{
					ID:        "5678",
					Name:      "qux",
					Namespace: "ns",
					Containers: []*kubecontainer.Container{
						{
							ID:    kubecontainer.DockerID("bazbaz").ContainerID(),
							Name:  "bazbaz",
							Hash:  0x1234,
							State: kubecontainer.ContainerStateUnknown,
						},
					},
				},
			},
		},
		{
			[]docker.APIContainers{},
			[]docker.APIContainers{},
			true,
			nil,
		},
	}
	fakeClient := &FakeDockerClient{}
	np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
	// image back-off is set to nil, this test shouldnt pull images
	containerManager := NewFakeDockerManager(fakeClient, &record.FakeRecorder{}, nil, nil, &cadvisorapi.MachineInfo{}, kubetypes.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, nil, nil, nil)
	for i, test := range tests {
		fakeClient.ContainerList = test.containerList
		fakeClient.ExitedContainerList = test.exitedContainerList

		result, _ := containerManager.GetPods(test.all)
		for i := range result {
			sort.Sort(containersByID(result[i].Containers))
		}
		for i := range test.expectedPods {
			sort.Sort(containersByID(test.expectedPods[i].Containers))
		}
		sort.Sort(podsByID(result))
		sort.Sort(podsByID(test.expectedPods))
		if !reflect.DeepEqual(test.expectedPods, result) {
			t.Errorf("%d: expected: %#v, saw: %#v", i, test.expectedPods, result)
		}
	}
}
func TestGetPodStatusFromNetworkPlugin(t *testing.T) {
	cases := []struct {
		pod                *v1.Pod
		fakePodIP          string
		containerID        string
		infraContainerID   string
		networkStatusError error
		expectRunning      bool
		expectUnknown      bool
	}{
		{
			pod: &v1.Pod{
				ObjectMeta: metav1.ObjectMeta{
					UID:       "12345678",
					Name:      "foo",
					Namespace: "new",
				},
				Spec: v1.PodSpec{
					Containers: []v1.Container{{Name: "container"}},
				},
			},
			fakePodIP:          "10.10.10.10",
			containerID:        "123",
			infraContainerID:   "9876",
			networkStatusError: nil,
			expectRunning:      true,
			expectUnknown:      false,
		},
		{
			pod: &v1.Pod{
				ObjectMeta: metav1.ObjectMeta{
					UID:       "12345678",
					Name:      "foo",
					Namespace: "new",
				},
				Spec: v1.PodSpec{
					Containers: []v1.Container{{Name: "container"}},
				},
			},
			fakePodIP:          "",
			containerID:        "123",
			infraContainerID:   "9876",
			networkStatusError: fmt.Errorf("CNI plugin error"),
			expectRunning:      false,
			expectUnknown:      true,
		},
	}
	for _, test := range cases {
		dm, fakeDocker := newTestDockerManager()
		ctrl := gomock.NewController(t)
		fnp := mock_network.NewMockNetworkPlugin(ctrl)
		dm.networkPlugin = fnp

		fakeDocker.SetFakeRunningContainers([]*FakeContainer{
			{
				ID:      test.containerID,
				Name:    fmt.Sprintf("/k8s_container_%s_%s_%s_42", test.pod.Name, test.pod.Namespace, test.pod.UID),
				Running: true,
			},
			{
				ID:      test.infraContainerID,
				Name:    fmt.Sprintf("/k8s_POD.%s_%s_%s_%s_42", strconv.FormatUint(generatePodInfraContainerHash(test.pod), 16), test.pod.Name, test.pod.Namespace, test.pod.UID),
				Running: true,
			},
		})

		fnp.EXPECT().Name().Return("someNetworkPlugin").AnyTimes()
		var podNetworkStatus *network.PodNetworkStatus
		if test.fakePodIP != "" {
			podNetworkStatus = &network.PodNetworkStatus{IP: net.ParseIP(test.fakePodIP)}
		}
		fnp.EXPECT().GetPodNetworkStatus(test.pod.Namespace, test.pod.Name, kubecontainer.DockerID(test.infraContainerID).ContainerID()).Return(podNetworkStatus, test.networkStatusError)

		podStatus, err := dm.GetPodStatus(test.pod.UID, test.pod.Name, test.pod.Namespace)
		if err != nil {
			t.Fatal(err)
		}
		if podStatus.IP != test.fakePodIP {
			t.Errorf("Got wrong ip, expected %v, got %v", test.fakePodIP, podStatus.IP)
		}

		expectedStatesCount := 0
		var expectedState kubecontainer.ContainerState
		if test.expectRunning {
			expectedState = kubecontainer.ContainerStateRunning
		} else if test.expectUnknown {
			expectedState = kubecontainer.ContainerStateUnknown
		} else {
			t.Errorf("Some state has to be expected")
		}
		for _, containerStatus := range podStatus.ContainerStatuses {
			if containerStatus.State == expectedState {
				expectedStatesCount++
			}
		}
		if expectedStatesCount < 1 {
			t.Errorf("Invalid count of containers with expected state")
		}
	}
}