Exemplo n.º 1
0
func BuildKubernetesNodeConfig(options configapi.NodeConfig, enableProxy, enableDNS bool) (*NodeConfig, error) {
	originClient, osClientConfig, err := configapi.GetOpenShiftClient(options.MasterKubeConfig)
	if err != nil {
		return nil, err
	}
	kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig)
	if err != nil {
		return nil, err
	}
	// Make a separate client for event reporting, to avoid event QPS blocking node calls
	eventClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig)
	if err != nil {
		return nil, err
	}

	if options.NodeName == "localhost" {
		glog.Warningf(`Using "localhost" as node name will not resolve from all locations`)
	}

	clientCAs, err := kcrypto.CertPoolFromFile(options.ServingInfo.ClientCA)
	if err != nil {
		return nil, err
	}

	imageTemplate := variable.NewDefaultImageTemplate()
	imageTemplate.Format = options.ImageConfig.Format
	imageTemplate.Latest = options.ImageConfig.Latest

	var path string
	var fileCheckInterval int64
	if options.PodManifestConfig != nil {
		path = options.PodManifestConfig.Path
		fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds
	}

	var dockerExecHandler dockertools.ExecHandler

	switch options.DockerConfig.ExecHandlerName {
	case configapi.DockerExecHandlerNative:
		dockerExecHandler = &dockertools.NativeExecHandler{}
	case configapi.DockerExecHandlerNsenter:
		dockerExecHandler = &dockertools.NsenterExecHandler{}
	}

	kubeAddressStr, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress)
	if err != nil {
		return nil, fmt.Errorf("cannot parse node address: %v", err)
	}
	kubePort, err := strconv.Atoi(kubePortStr)
	if err != nil {
		return nil, fmt.Errorf("cannot parse node port: %v", err)
	}

	options.NetworkConfig.NetworkPluginName, err = validateAndGetNetworkPluginName(originClient, options.NetworkConfig.NetworkPluginName)
	if err != nil {
		return nil, err
	}

	// Defaults are tested in TestKubeletDefaults
	server := kubeletoptions.NewKubeletServer()
	// Adjust defaults
	server.Config = path
	server.RootDirectory = options.VolumeDirectory
	server.NodeIP = options.NodeIP
	server.HostnameOverride = options.NodeName
	server.AllowPrivileged = true
	server.RegisterNode = true
	server.Address = kubeAddressStr
	server.Port = uint(kubePort)
	server.ReadOnlyPort = 0        // no read only access
	server.CAdvisorPort = 0        // no unsecured cadvisor access
	server.HealthzPort = 0         // no unsecured healthz access
	server.HealthzBindAddress = "" // no unsecured healthz access
	server.ClusterDNS = options.DNSIP
	server.ClusterDomain = options.DNSDomain
	server.NetworkPluginName = options.NetworkConfig.NetworkPluginName
	server.HostNetworkSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",")
	server.HostPIDSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",")
	server.HostIPCSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",")
	server.HTTPCheckFrequency = unversioned.Duration{Duration: time.Duration(0)} // no remote HTTP pod creation access
	server.FileCheckFrequency = unversioned.Duration{Duration: time.Duration(fileCheckInterval) * time.Second}
	server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod")
	server.CPUCFSQuota = true // enable cpu cfs quota enforcement by default
	server.MaxPods = 110
	server.SerializeImagePulls = false // disable serial image pulls by default
	if enableDNS {
		// if we are running local DNS, skydns will load the default recursive nameservers for us
		server.ResolverConfig = ""
	}

	if sdnplugin.IsOpenShiftNetworkPlugin(server.NetworkPluginName) {
		// set defaults for openshift-sdn
		server.HairpinMode = componentconfig.HairpinNone
		server.ConfigureCBR0 = false
	}

	// prevents kube from generating certs
	server.TLSCertFile = options.ServingInfo.ServerCert.CertFile
	server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile

	containerized := cmdutil.Env("OPENSHIFT_CONTAINERIZED", "") == "true"
	server.Containerized = containerized

	// resolve extended arguments
	// TODO: this should be done in config validation (along with the above) so we can provide
	// proper errors
	if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 {
		return nil, kerrors.NewAggregate(err)
	}

	proxyconfig, err := buildKubeProxyConfig(options)
	if err != nil {
		return nil, err
	}

	cfg, err := kubeletapp.UnsecuredKubeletConfig(server)
	if err != nil {
		return nil, err
	}

	// provide any config overrides
	cfg.NodeName = options.NodeName
	cfg.KubeClient = clientadapter.FromUnversionedClient(kubeClient)
	cfg.EventClient = clientadapter.FromUnversionedClient(eventClient)
	cfg.DockerExecHandler = dockerExecHandler

	// Setup auth
	authnTTL, err := time.ParseDuration(options.AuthConfig.AuthenticationCacheTTL)
	if err != nil {
		return nil, err
	}
	authn, err := newAuthenticator(clientCAs, clientcmd.AnonymousClientConfig(osClientConfig), authnTTL, options.AuthConfig.AuthenticationCacheSize)
	if err != nil {
		return nil, err
	}

	authzAttr, err := newAuthorizerAttributesGetter(options.NodeName)
	if err != nil {
		return nil, err
	}

	authzTTL, err := time.ParseDuration(options.AuthConfig.AuthorizationCacheTTL)
	if err != nil {
		return nil, err
	}
	authz, err := newAuthorizer(originClient, authzTTL, options.AuthConfig.AuthorizationCacheSize)
	if err != nil {
		return nil, err
	}

	cfg.Auth = kubeletserver.NewKubeletAuth(authn, authzAttr, authz)

	// Make sure the node doesn't think it is in standalone mode
	// This is required for the node to enforce nodeSelectors on pods, to set hostIP on pod status updates, etc
	cfg.StandaloneMode = false

	// TODO: could be cleaner
	if configapi.UseTLS(options.ServingInfo) {
		extraCerts, err := configapi.GetNamedCertificateMap(options.ServingInfo.NamedCertificates)
		if err != nil {
			return nil, err
		}
		cfg.TLSOptions = &kubeletserver.TLSOptions{
			Config: crypto.SecureTLSConfig(&tls.Config{
				// RequestClientCert lets us request certs, but allow requests without client certs
				// Verification is done by the authn layer
				ClientAuth: tls.RequestClientCert,
				ClientCAs:  clientCAs,
				// Set SNI certificate func
				// Do not use NameToCertificate, since that requires certificates be included in the server's tlsConfig.Certificates list,
				// which we do not control when running with http.Server#ListenAndServeTLS
				GetCertificate: cmdutil.GetCertificateFunc(extraCerts),
			}),
			CertFile: options.ServingInfo.ServerCert.CertFile,
			KeyFile:  options.ServingInfo.ServerCert.KeyFile,
		}
	} else {
		cfg.TLSOptions = nil
	}

	// Prepare cloud provider
	cloud, err := cloudprovider.InitCloudProvider(server.CloudProvider, server.CloudConfigFile)
	if err != nil {
		return nil, err
	}
	if cloud != nil {
		glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile)
	}
	cfg.Cloud = cloud

	iptablesSyncPeriod, err := time.ParseDuration(options.IPTablesSyncPeriod)
	if err != nil {
		return nil, fmt.Errorf("Cannot parse the provided ip-tables sync period (%s) : %v", options.IPTablesSyncPeriod, err)
	}
	sdnPlugin, err := sdnplugin.NewNodePlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient, options.NodeName, options.NodeIP, iptablesSyncPeriod, options.NetworkConfig.MTU)
	if err != nil {
		return nil, fmt.Errorf("SDN initialization failed: %v", err)
	}
	if sdnPlugin != nil {
		cfg.NetworkPlugins = append(cfg.NetworkPlugins, sdnPlugin)
	}

	endpointFilter, err := sdnplugin.NewProxyPlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient)
	if err != nil {
		return nil, fmt.Errorf("SDN proxy initialization failed: %v", err)
	}

	config := &NodeConfig{
		BindAddress: options.ServingInfo.BindAddress,

		AllowDisabledDocker: options.AllowDisabledDocker,
		Containerized:       containerized,

		Client: kubeClient,

		VolumeDir: options.VolumeDirectory,

		KubeletServer: server,
		KubeletConfig: cfg,

		ServicesReady: make(chan struct{}),

		ProxyConfig: proxyconfig,

		SDNPlugin:                 sdnPlugin,
		FilteringEndpointsHandler: endpointFilter,
	}

	if enableDNS {
		dnsConfig, err := dns.NewServerDefaults()
		if err != nil {
			return nil, fmt.Errorf("DNS configuration was not possible: %v", err)
		}
		if len(options.DNSIP) > 0 {
			dnsConfig.DnsAddr = options.DNSIP + ":53"
		}
		dnsConfig.Domain = server.ClusterDomain + "."
		dnsConfig.Local = "openshift.default.svc." + dnsConfig.Domain

		services, serviceStore := dns.NewCachedServiceAccessorAndStore()
		endpoints, endpointsStore := dns.NewCachedEndpointsAccessorAndStore()
		if !enableProxy {
			endpoints = kubeClient
			endpointsStore = nil
		}

		// TODO: use kubeletConfig.ResolverConfig as an argument to etcd in the event the
		//   user sets it, instead of passing it to the kubelet.

		config.ServiceStore = serviceStore
		config.EndpointsStore = endpointsStore
		config.DNSServer = &dns.Server{
			Config:      dnsConfig,
			Services:    services,
			Endpoints:   endpoints,
			MetricsName: "node",
		}
	}

	return config, nil
}
Exemplo n.º 2
0
func (s *KubeletExecutorServer) runKubelet(
	nodeInfos <-chan executor.NodeInfo,
	kubeletDone chan<- struct{},
	staticPodsConfigPath string,
	apiclient *clientset.Clientset,
	podLW *cache.ListWatch,
	registry executor.Registry,
	executorDone <-chan struct{},
) (err error) {
	defer func() {
		if err != nil {
			// close the channel here. When Run returns without error, the executorKubelet is
			// responsible to do this. If it returns with an error, we are responsible here.
			close(kubeletDone)
		}
	}()

	kcfg, err := kubeletapp.UnsecuredKubeletConfig(s.KubeletServer)
	if err != nil {
		return err
	}

	// apply Mesos specific settings
	kcfg.Builder = func(kc *kubeletapp.KubeletConfig) (kubeletapp.KubeletBootstrap, *kconfig.PodConfig, error) {
		k, pc, err := kubeletapp.CreateAndInitKubelet(kc)
		if err != nil {
			return k, pc, err
		}

		// decorate kubelet such that it shuts down when the executor is
		decorated := &executorKubelet{
			Kubelet:      k.(*kubelet.Kubelet),
			kubeletDone:  kubeletDone,
			executorDone: executorDone,
		}

		return decorated, pc, nil
	}
	kcfg.DockerDaemonContainer = "" // don't move the docker daemon into a cgroup
	kcfg.Hostname = kcfg.HostnameOverride
	kcfg.KubeClient = apiclient

	// taken from KubeletServer#Run(*KubeletConfig)
	eventClientConfig, err := kubeletapp.CreateAPIServerClientConfig(s.KubeletServer)
	if err != nil {
		return err
	}

	// make a separate client for events
	eventClientConfig.QPS = s.EventRecordQPS
	eventClientConfig.Burst = s.EventBurst
	kcfg.EventClient, err = clientset.NewForConfig(eventClientConfig)
	if err != nil {
		return err
	}

	kcfg.NodeName = kcfg.HostnameOverride
	kcfg.PodConfig = kconfig.NewPodConfig(kconfig.PodConfigNotificationIncremental, kcfg.Recorder) // override the default pod source
	kcfg.StandaloneMode = false
	kcfg.SystemContainer = "" // don't take control over other system processes.
	if kcfg.Cloud != nil {
		// fail early and hard because having the cloud provider loaded would go unnoticed,
		// but break bigger cluster because accessing the state.json from every slave kills the master.
		panic("cloud provider must not be set")
	}

	// create custom cAdvisor interface which return the resource values that Mesos reports
	ni := <-nodeInfos
	cAdvisorInterface, err := NewMesosCadvisor(ni.Cores, ni.Mem, s.CAdvisorPort)
	if err != nil {
		return err
	}

	kcfg.CAdvisorInterface = cAdvisorInterface
	kcfg.ContainerManager, err = cm.NewContainerManager(kcfg.Mounter, cAdvisorInterface)
	if err != nil {
		return err
	}

	go func() {
		for ni := range nodeInfos {
			// TODO(sttts): implement with MachineAllocable mechanism when https://github.com/kubernetes/kubernetes/issues/13984 is finished
			log.V(3).Infof("ignoring updated node resources: %v", ni)
		}
	}()

	// create main pod source, it will stop generating events once executorDone is closed
	newSourceMesos(executorDone, kcfg.PodConfig.Channel(mesosSource), podLW, registry)

	// create static-pods directory file source
	log.V(2).Infof("initializing static pods source factory, configured at path %q", staticPodsConfigPath)
	fileSourceUpdates := kcfg.PodConfig.Channel(kubetypes.FileSource)
	kconfig.NewSourceFile(staticPodsConfigPath, kcfg.HostnameOverride, kcfg.FileCheckFrequency, fileSourceUpdates)

	// run the kubelet
	// NOTE: because kcfg != nil holds, the upstream Run function will not
	//       initialize the cloud provider. We explicitly wouldn't want
	//       that because then every kubelet instance would query the master
	//       state.json which does not scale.
	err = kubeletapp.Run(s.KubeletServer, kcfg)
	return
}
Exemplo n.º 3
0
func BuildKubernetesNodeConfig(options configapi.NodeConfig) (*NodeConfig, error) {
	originClient, _, err := configapi.GetOpenShiftClient(options.MasterKubeConfig)
	if err != nil {
		return nil, err
	}
	kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig)
	if err != nil {
		return nil, err
	}
	// Make a separate client for event reporting, to avoid event QPS blocking node calls
	eventClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig)
	if err != nil {
		return nil, err
	}

	if options.NodeName == "localhost" {
		glog.Warningf(`Using "localhost" as node name will not resolve from all locations`)
	}

	clientCAs, err := util.CertPoolFromFile(options.ServingInfo.ClientCA)
	if err != nil {
		return nil, err
	}

	imageTemplate := variable.NewDefaultImageTemplate()
	imageTemplate.Format = options.ImageConfig.Format
	imageTemplate.Latest = options.ImageConfig.Latest

	var path string
	var fileCheckInterval int64
	if options.PodManifestConfig != nil {
		path = options.PodManifestConfig.Path
		fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds
	}

	var dockerExecHandler dockertools.ExecHandler

	switch options.DockerConfig.ExecHandlerName {
	case configapi.DockerExecHandlerNative:
		dockerExecHandler = &dockertools.NativeExecHandler{}
	case configapi.DockerExecHandlerNsenter:
		dockerExecHandler = &dockertools.NsenterExecHandler{}
	}

	kubeAddressStr, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress)
	if err != nil {
		return nil, fmt.Errorf("cannot parse node address: %v", err)
	}
	kubePort, err := strconv.Atoi(kubePortStr)
	if err != nil {
		return nil, fmt.Errorf("cannot parse node port: %v", err)
	}

	// declare the OpenShift defaults from config
	server := kubeletoptions.NewKubeletServer()
	server.Config = path
	server.RootDirectory = options.VolumeDirectory
	server.NodeIP = options.NodeIP
	server.HostnameOverride = options.NodeName
	server.AllowPrivileged = true
	server.RegisterNode = true
	server.Address = kubeAddressStr
	server.Port = uint(kubePort)
	server.ReadOnlyPort = 0 // no read only access
	server.CAdvisorPort = 0 // no unsecured cadvisor access
	server.HealthzPort = 0  // no unsecured healthz access
	server.ClusterDNS = options.DNSIP
	server.ClusterDomain = options.DNSDomain
	server.NetworkPluginName = options.NetworkConfig.NetworkPluginName
	server.HostNetworkSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",")
	server.HostPIDSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",")
	server.HostIPCSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",")
	server.HTTPCheckFrequency = unversioned.Duration{Duration: time.Duration(0)} // no remote HTTP pod creation access
	server.FileCheckFrequency = unversioned.Duration{Duration: time.Duration(fileCheckInterval) * time.Second}
	server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod")
	server.CPUCFSQuota = true // enable cpu cfs quota enforcement by default
	server.MaxPods = 110

	// prevents kube from generating certs
	server.TLSCertFile = options.ServingInfo.ServerCert.CertFile
	server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile

	containerized := cmdutil.Env("OPENSHIFT_CONTAINERIZED", "") == "true"
	server.Containerized = containerized

	// resolve extended arguments
	// TODO: this should be done in config validation (along with the above) so we can provide
	// proper errors
	if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 {
		return nil, kerrors.NewAggregate(err)
	}

	proxyconfig, err := buildKubeProxyConfig(options)
	if err != nil {
		return nil, err
	}

	cfg, err := kubeletapp.UnsecuredKubeletConfig(server)
	if err != nil {
		return nil, err
	}

	// Replace the standard k8s emptyDir volume plugin with a wrapper version
	// which offers XFS quota functionality, but only if the node config
	// specifies an empty dir quota to apply to projects:
	if options.VolumeConfig.LocalQuota.PerFSGroup != nil {
		glog.V(2).Info("Replacing empty-dir volume plugin with quota wrapper")
		wrappedEmptyDirPlugin := false

		quotaApplicator, err := empty_dir.NewQuotaApplicator(options.VolumeDirectory)
		if err != nil {
			return nil, err
		}

		// Create a volume spec with emptyDir we can use to search for the
		// emptyDir plugin with CanSupport:
		emptyDirSpec := &volume.Spec{
			Volume: &kapi.Volume{
				VolumeSource: kapi.VolumeSource{
					EmptyDir: &kapi.EmptyDirVolumeSource{},
				},
			},
		}

		for idx, plugin := range cfg.VolumePlugins {
			// Can't really do type checking or use a constant here as they are not exported:
			if plugin.CanSupport(emptyDirSpec) {
				wrapper := empty_dir.EmptyDirQuotaPlugin{
					Wrapped:         plugin,
					Quota:           *options.VolumeConfig.LocalQuota.PerFSGroup,
					QuotaApplicator: quotaApplicator,
				}
				cfg.VolumePlugins[idx] = &wrapper
				wrappedEmptyDirPlugin = true
			}
		}
		// Because we can't look for the k8s emptyDir plugin by any means that would
		// survive a refactor, error out if we couldn't find it:
		if !wrappedEmptyDirPlugin {
			return nil, errors.New("unable to wrap emptyDir volume plugin for quota support")
		}
	} else {
		glog.V(2).Info("Skipping replacement of empty-dir volume plugin with quota wrapper, no local fsGroup quota specified")
	}

	// provide any config overrides
	cfg.NodeName = options.NodeName
	cfg.KubeClient = internalclientset.FromUnversionedClient(kubeClient)
	cfg.EventClient = internalclientset.FromUnversionedClient(eventClient)
	cfg.DockerExecHandler = dockerExecHandler

	// docker-in-docker (dind) deployments are used for testing
	// networking plugins.  Running openshift under dind won't work
	// with the real oom adjuster due to the state of the cgroups path
	// in a dind container that uses systemd for init.  Similarly,
	// cgroup manipulation of the nested docker daemon doesn't work
	// properly under centos/rhel and should be disabled by setting
	// the name of the container to an empty string.
	//
	// This workaround should become unnecessary once user namespaces
	if value := cmdutil.Env("OPENSHIFT_DIND", ""); value == "true" {
		glog.Warningf("Using FakeOOMAdjuster for docker-in-docker compatibility")
		cfg.OOMAdjuster = oom.NewFakeOOMAdjuster()
	}

	// Setup auth
	osClient, osClientConfig, err := configapi.GetOpenShiftClient(options.MasterKubeConfig)
	if err != nil {
		return nil, err
	}
	authnTTL, err := time.ParseDuration(options.AuthConfig.AuthenticationCacheTTL)
	if err != nil {
		return nil, err
	}
	authn, err := newAuthenticator(clientCAs, clientcmd.AnonymousClientConfig(osClientConfig), authnTTL, options.AuthConfig.AuthenticationCacheSize)
	if err != nil {
		return nil, err
	}

	authzAttr, err := newAuthorizerAttributesGetter(options.NodeName)
	if err != nil {
		return nil, err
	}

	authzTTL, err := time.ParseDuration(options.AuthConfig.AuthorizationCacheTTL)
	if err != nil {
		return nil, err
	}
	authz, err := newAuthorizer(osClient, authzTTL, options.AuthConfig.AuthorizationCacheSize)
	if err != nil {
		return nil, err
	}

	cfg.Auth = kubeletserver.NewKubeletAuth(authn, authzAttr, authz)

	// Make sure the node doesn't think it is in standalone mode
	// This is required for the node to enforce nodeSelectors on pods, to set hostIP on pod status updates, etc
	cfg.StandaloneMode = false

	// TODO: could be cleaner
	if configapi.UseTLS(options.ServingInfo) {
		extraCerts, err := configapi.GetNamedCertificateMap(options.ServingInfo.NamedCertificates)
		if err != nil {
			return nil, err
		}
		cfg.TLSOptions = &kubeletserver.TLSOptions{
			Config: crypto.SecureTLSConfig(&tls.Config{
				// RequestClientCert lets us request certs, but allow requests without client certs
				// Verification is done by the authn layer
				ClientAuth: tls.RequestClientCert,
				ClientCAs:  clientCAs,
				// Set SNI certificate func
				// Do not use NameToCertificate, since that requires certificates be included in the server's tlsConfig.Certificates list,
				// which we do not control when running with http.Server#ListenAndServeTLS
				GetCertificate: cmdutil.GetCertificateFunc(extraCerts),
			}),
			CertFile: options.ServingInfo.ServerCert.CertFile,
			KeyFile:  options.ServingInfo.ServerCert.KeyFile,
		}
	} else {
		cfg.TLSOptions = nil
	}

	// Prepare cloud provider
	cloud, err := cloudprovider.InitCloudProvider(server.CloudProvider, server.CloudConfigFile)
	if err != nil {
		return nil, err
	}
	if cloud != nil {
		glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile)
	}
	cfg.Cloud = cloud

	sdnPlugin, endpointFilter, err := factory.NewPlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient, options.NodeName, options.NodeIP)
	if err != nil {
		return nil, fmt.Errorf("SDN initialization failed: %v", err)
	}
	if sdnPlugin != nil {
		cfg.NetworkPlugins = append(cfg.NetworkPlugins, sdnPlugin)
	}

	config := &NodeConfig{
		BindAddress: options.ServingInfo.BindAddress,

		AllowDisabledDocker: options.AllowDisabledDocker,
		Containerized:       containerized,

		Client: kubeClient,

		VolumeDir: options.VolumeDirectory,

		KubeletServer: server,
		KubeletConfig: cfg,

		ProxyConfig: proxyconfig,

		MTU: options.NetworkConfig.MTU,

		SDNPlugin:                 sdnPlugin,
		FilteringEndpointsHandler: endpointFilter,
	}

	return config, nil
}