Esempio n. 1
0
func main() {
	clientConfig := kubectl_util.DefaultClientConfig(flags)
	flags.Parse(os.Args)
	cfg := parseCfg(*config, *lbDefAlgorithm)

	var kubeClient *unversioned.Client
	var err error

	defErrorPage := newStaticPageHandler(*errorPage, defaultErrorPage)
	if defErrorPage == nil {
		glog.Fatalf("Failed to load the default error page")
	}

	go registerHandlers(defErrorPage)

	var tcpSvcs map[string]int
	if *tcpServices != "" {
		tcpSvcs = parseTCPServices(*tcpServices)
	} else {
		glog.Infof("No tcp/https services specified")
	}

	if *startSyslog {
		cfg.startSyslog = true
		_, err = newSyslogServer("/var/run/haproxy.log.socket")
		if err != nil {
			glog.Fatalf("Failed to start syslog server: %v", err)
		}
	}

	if *cluster {
		if kubeClient, err = unversioned.NewInCluster(); err != nil {
			glog.Fatalf("Failed to create client: %v", err)
		}
	} else {
		config, err := clientConfig.ClientConfig()
		if err != nil {
			glog.Fatalf("error connecting to the client: %v", err)
		}
		kubeClient, err = unversioned.New(config)
	}
	namespace, specified, err := clientConfig.Namespace()
	if err != nil {
		glog.Fatalf("unexpected error: %v", err)
	}
	if !specified {
		namespace = api.NamespaceAll
	}

	// TODO: Handle multiple namespaces
	lbc := newLoadBalancerController(cfg, kubeClient, namespace, tcpSvcs)
	go lbc.epController.Run(util.NeverStop)
	go lbc.svcController.Run(util.NeverStop)
	if *dry {
		dryRun(lbc)
	} else {
		lbc.cfg.reload()
		util.Until(lbc.worker, time.Second, util.NeverStop)
	}
}
Esempio n. 2
0
func main() {
	var kubeClient *unversioned.Client
	flags.AddGoFlagSet(flag.CommandLine)
	flags.Parse(os.Args)
	clientConfig := kubectl_util.DefaultClientConfig(flags)

	glog.Infof("Using build: %v - %v", gitRepo, version)

	if *buildCfg {
		fmt.Printf("Example of ConfigMap to customize NGINX configuration:\n%v", nginx.ConfigMapAsString())
		os.Exit(0)
	}

	if *defaultSvc == "" {
		glog.Fatalf("Please specify --default-backend-service")
	}

	var err error
	if *inCluster {
		kubeClient, err = unversioned.NewInCluster()
	} else {
		config, connErr := clientConfig.ClientConfig()
		if connErr != nil {
			glog.Fatalf("error connecting to the client: %v", err)
		}
		kubeClient, err = unversioned.New(config)
	}
	if err != nil {
		glog.Fatalf("failed to create client: %v", err)
	}

	runtimePodInfo := &podInfo{NodeIP: "127.0.0.1"}
	if *inCluster {
		runtimePodInfo, err = getPodDetails(kubeClient)
		if err != nil {
			glog.Fatalf("unexpected error getting runtime information: %v", err)
		}
	}
	if err := isValidService(kubeClient, *defaultSvc); err != nil {
		glog.Fatalf("no service with name %v found: %v", *defaultSvc, err)
	}
	glog.Infof("Validated %v as the default backend", *defaultSvc)

	lbc, err := newLoadBalancerController(kubeClient, *resyncPeriod, *defaultSvc, *watchNamespace, *nxgConfigMap, *tcpConfigMapName, *udpConfigMapName, runtimePodInfo)
	if err != nil {
		glog.Fatalf("%v", err)
	}

	go registerHandlers(lbc)
	go handleSigterm(lbc)

	lbc.Run()

	for {
		glog.Infof("Handled quit, awaiting pod deletion")
		time.Sleep(30 * time.Second)
	}
}
Esempio n. 3
0
func main() {
	flags.AddGoFlagSet(flag.CommandLine)
	flags.Parse(os.Args)
	clientConfig := kubectl_util.DefaultClientConfig(flags)

	glog.Infof("Using build: %v - %v", gitRepo, version)

	if *defaultSvc == "" {
		glog.Fatalf("Please specify --default-backend-service")
	}

	kubeClient, err := unversioned.NewInCluster()
	if err != nil {
		config, err := clientConfig.ClientConfig()
		if err != nil {
			glog.Fatalf("error configuring the client: %v", err)
		}
		kubeClient, err = unversioned.New(config)
		if err != nil {
			glog.Fatalf("failed to create client: %v", err)
		}
	}

	runtimePodInfo, err := getPodDetails(kubeClient)
	if err != nil {
		runtimePodInfo = &podInfo{NodeIP: "127.0.0.1"}
		glog.Warningf("unexpected error getting runtime information: %v", err)
	}
	if err := isValidService(kubeClient, *defaultSvc); err != nil {
		glog.Fatalf("no service with name %v found: %v", *defaultSvc, err)
	}
	glog.Infof("Validated %v as the default backend", *defaultSvc)

	if *nxgConfigMap != "" {
		_, _, err = parseNsName(*nxgConfigMap)
		if err != nil {
			glog.Fatalf("configmap error: %v", err)
		}
	}

	lbc, err := newLoadBalancerController(kubeClient, *resyncPeriod,
		*defaultSvc, *watchNamespace, *nxgConfigMap, *tcpConfigMapName,
		*udpConfigMapName, *defSSLCertificate, *defHealthzURL, runtimePodInfo)
	if err != nil {
		glog.Fatalf("%v", err)
	}

	go registerHandlers(lbc)
	go handleSigterm(lbc)

	lbc.Run()

	for {
		glog.Infof("Handled quit, awaiting pod deletion")
		time.Sleep(30 * time.Second)
	}
}
Esempio n. 4
0
func main() {
	clientConfig := kubectl_util.DefaultClientConfig(flags)
	flags.Parse(os.Args)

	var err error
	var kubeClient *unversioned.Client

	if *cluster {
		if kubeClient, err = unversioned.NewInCluster(); err != nil {
			glog.Fatalf("Failed to create client: %v", err)
		}
	} else {
		config, err := clientConfig.ClientConfig()
		if err != nil {
			glog.Fatalf("error connecting to the client: %v", err)
		}
		kubeClient, err = unversioned.New(config)
	}

	namespace, specified, err := clientConfig.Namespace()
	if err != nil {
		glog.Fatalf("unexpected error: %v", err)
	}

	if !specified {
		namespace = ""
	}

	err = loadIPVModule()
	if err != nil {
		glog.Fatalf("Terminating execution: %v", err)
	}

	err = changeSysctl()
	if err != nil {
		glog.Fatalf("Terminating execution: %v", err)
	}

	err = resetIPVS()
	if err != nil {
		glog.Fatalf("Terminating execution: %v", err)
	}

	glog.Info("starting LVS configuration")
	if *useUnicast {
		glog.Info("keepalived will use unicast to sync the nodes")
	}
	ipvsc := newIPVSController(kubeClient, namespace, *useUnicast, *password)
	go ipvsc.epController.Run(wait.NeverStop)
	go ipvsc.svcController.Run(wait.NeverStop)
	go wait.Until(ipvsc.worker, time.Second, wait.NeverStop)

	time.Sleep(5 * time.Second)
	glog.Info("starting keepalived to announce VIPs")
	ipvsc.keepalived.Start()
}
Esempio n. 5
0
func createKubeClient(flags *flag.FlagSet, inCluster bool) (*kube_client.Client, error) {
	if inCluster {
		return kube_client.NewInCluster()
	}
	clientConfig := kubectl_util.DefaultClientConfig(flags)
	config, err := clientConfig.ClientConfig()
	if err != nil {
		fmt.Errorf("error connecting to the client: %v", err)
	}
	return kube_client.NewOrDie(config), nil
}
Esempio n. 6
0
func main() {
	flags.Parse(os.Args)
	cfg := parseCfg(*config)
	if len(*tcpServices) == 0 {
		glog.Infof("All tcp/https services will be ignored.")
	}
	go healthzServer()

	proc.StartReaper()

	var kubeClient *unversioned.Client
	var err error

	if *startSyslog {
		cfg.startSyslog = true
		_, err = newSyslogServer("/var/run/haproxy.log.socket")
		if err != nil {
			glog.Fatalf("Failed to start syslog server: %v", err)
		}
	}

	clientConfig := kubectl_util.DefaultClientConfig(flags)
	if *cluster {
		if kubeClient, err = unversioned.NewInCluster(); err != nil {
			glog.Fatalf("Failed to create client: %v", err)
		}
	} else {
		config, err := clientConfig.ClientConfig()
		if err != nil {
			glog.Fatalf("error connecting to the client: %v", err)
		}
		kubeClient, err = unversioned.New(config)
	}
	namespace, specified, err := clientConfig.Namespace()
	if err != nil {
		glog.Fatalf("unexpected error: %v", err)
	}
	if !specified {
		namespace = "default"
	}

	// TODO: Handle multiple namespaces
	lbc := newLoadBalancerController(cfg, kubeClient, namespace)
	go lbc.epController.Run(util.NeverStop)
	go lbc.svcController.Run(util.NeverStop)
	if *dry {
		dryRun(lbc)
	} else {
		lbc.cfg.reload()
		util.Until(lbc.worker, time.Second, util.NeverStop)
	}
}
Esempio n. 7
0
func createKubeClient(flags *flag.FlagSet, inCluster bool) (*kube_client.Client, error) {
	var config *kube_restclient.Config
	var err error
	if inCluster {
		config, err = kube_restclient.InClusterConfig()
	} else {
		clientConfig := kubectl_util.DefaultClientConfig(flags)
		config, err = clientConfig.ClientConfig()
	}
	if err != nil {
		fmt.Errorf("error connecting to the client: %v", err)
	}
	config.ContentType = *contentType
	return kube_client.NewOrDie(config), nil
}
Esempio n. 8
0
func makeClient() (*client.Client, error) {
	var cfg *restclient.Config
	var err error

	if *inCluster {
		if cfg, err = restclient.InClusterConfig(); err != nil {
			return nil, err
		}
	} else {
		clientConfig := kubectl_util.DefaultClientConfig(flags)
		if cfg, err = clientConfig.ClientConfig(); err != nil {
			return nil, err
		}
	}
	return client.New(cfg)
}
Esempio n. 9
0
func main() {
	flags.Parse(os.Args)
	cfg := parseCfg(*config)
	if len(*tcpServices) == 0 {
		glog.Infof("All tcp/https services will be ignored.")
	}
	go healthzServer()

	var kubeClient *client.Client
	var err error
	clientConfig := kubectl_util.DefaultClientConfig(flags)
	if *cluster {
		if kubeClient, err = client.NewInCluster(); err != nil {
			glog.Fatalf("Failed to create client: %v", err)
		}
	} else {
		config, err := clientConfig.ClientConfig()
		if err != nil {
			glog.Fatalf("error connecting to the client: %v", err)
		}
		kubeClient, err = client.New(config)
	}
	namespace, specified, err := clientConfig.Namespace()
	if err != nil {
		glog.Fatalf("unexpected error: %v", err)
	}
	if !specified {
		namespace = "default"
	}

	namespace = ""

	// TODO: Handle multiple namespaces
	lbc := newLoadBalancerController(cfg, kubeClient, namespace)
	go lbc.epController.Run(util.NeverStop)
	go lbc.svcController.Run(util.NeverStop)
	if *dry {
		dryRun(lbc)
	} else {
		util.Until(lbc.worker, time.Second, util.NeverStop)
	}
}
Esempio n. 10
0
// main function for GLBC.
func main() {
	// TODO: Add a healthz endpoint
	var kubeClient *client.Client
	var err error
	var clusterManager *controller.ClusterManager
	flags.Parse(os.Args)
	clientConfig := kubectl_util.DefaultClientConfig(flags)

	// Set glog verbosity levels
	if *verbose {
		go_flag.Lookup("logtostderr").Value.Set("true")
		go_flag.Set("v", "4")
	}
	glog.Infof("Starting GLBC image: %v", imageVersion)
	if *defaultSvc == "" {
		glog.Fatalf("Please specify --default-backend")
	}

	if *proxyUrl != "" {
		// Create proxy kubeclient
		kubeClient = client.NewOrDie(&restclient.Config{
			Host:          *proxyUrl,
			ContentConfig: restclient.ContentConfig{GroupVersion: &unversioned.GroupVersion{Version: "v1"}},
		})
	} else {
		// Create kubeclient
		if *inCluster {
			if kubeClient, err = client.NewInCluster(); err != nil {
				glog.Fatalf("Failed to create client: %v.", err)
			}
		} else {
			config, err := clientConfig.ClientConfig()
			if err != nil {
				glog.Fatalf("error connecting to the client: %v", err)
			}
			kubeClient, err = client.New(config)
		}
	}
	// Wait for the default backend Service. There's no pretty way to do this.
	parts := strings.Split(*defaultSvc, "/")
	if len(parts) != 2 {
		glog.Fatalf("Default backend should take the form namespace/name: %v",
			*defaultSvc)
	}
	defaultBackendNodePort, err := getNodePort(kubeClient, parts[0], parts[1])
	if err != nil {
		glog.Fatalf("Could not configure default backend %v: %v",
			*defaultSvc, err)
	}

	if *proxyUrl == "" && *inCluster {
		// Create cluster manager
		clusterManager, err = controller.NewClusterManager(
			*clusterName, defaultBackendNodePort, *healthCheckPath)
		if err != nil {
			glog.Fatalf("%v", err)
		}
	} else {
		// Create fake cluster manager
		clusterManager = controller.NewFakeClusterManager(*clusterName).ClusterManager
	}

	// Start loadbalancer controller
	lbc, err := controller.NewLoadBalancerController(kubeClient, clusterManager, *resyncPeriod, *watchNamespace)
	if err != nil {
		glog.Fatalf("%v", err)
	}
	if clusterManager.ClusterNamer.ClusterName != "" {
		glog.V(3).Infof("Cluster name %+v", clusterManager.ClusterNamer.ClusterName)
	}
	go registerHandlers(lbc)
	go handleSigterm(lbc, *deleteAllOnQuit)

	lbc.Run()
	for {
		glog.Infof("Handled quit, awaiting pod deletion.")
		time.Sleep(30 * time.Second)
	}
}
Esempio n. 11
0
func main() {
	clientConfig := kubectl_util.DefaultClientConfig(flags)

	flags.AddGoFlagSet(flag.CommandLine)
	flags.Parse(os.Args)

	var err error
	var kubeClient *unversioned.Client

	if *configMapName == "" {
		glog.Fatalf("Please specify --services-configmap")
	}

	if *cluster {
		if kubeClient, err = unversioned.NewInCluster(); err != nil {
			glog.Fatalf("Failed to create client: %v", err)
		}
	} else {
		config, err := clientConfig.ClientConfig()
		if err != nil {
			glog.Fatalf("error connecting to the client: %v", err)
		}
		kubeClient, err = unversioned.New(config)
		if err != nil {
			glog.Fatalf("error connecting to the client: %v", err)
		}
	}

	namespace, specified, err := clientConfig.Namespace()
	if err != nil {
		glog.Fatalf("unexpected error: %v", err)
	}

	if !specified {
		namespace = api.NamespaceAll
	}

	err = loadIPVModule()
	if err != nil {
		glog.Fatalf("unexpected error: %v", err)
	}

	err = changeSysctl()
	if err != nil {
		glog.Fatalf("unexpected error: %v", err)
	}

	err = resetIPVS()
	if err != nil {
		glog.Fatalf("unexpected error: %v", err)
	}

	glog.Info("starting LVS configuration")
	if *useUnicast {
		glog.Info("keepalived will use unicast to sync the nodes")
	}
	ipvsc := newIPVSController(kubeClient, namespace, *useUnicast, *configMapName)
	go ipvsc.epController.Run(wait.NeverStop)
	go ipvsc.svcController.Run(wait.NeverStop)

	go ipvsc.syncQueue.run(time.Second, ipvsc.stopCh)

	go handleSigterm(ipvsc)

	glog.Info("starting keepalived to announce VIPs")
	ipvsc.keepalived.Start()
}
Esempio n. 12
0
// main function for GLBC.
func main() {
	// TODO: Add a healthz endpoint
	var kubeClient *client.Client
	var err error
	var clusterManager *controller.ClusterManager

	// TODO: We can simply parse all go flags with
	// flags.AddGoFlagSet(go_flag.CommandLine)
	// but that pollutes --help output with a ton of standard go flags.
	// We only really need a binary switch from light, v(2) logging to
	// heavier debug style V(4) logging, which we use --verbose for.
	flags.Parse(os.Args)
	clientConfig := kubectl_util.DefaultClientConfig(flags)

	// Set glog verbosity levels, unconditionally set --alsologtostderr.
	go_flag.Lookup("logtostderr").Value.Set("true")
	if *verbose {
		go_flag.Set("v", "4")
	}
	glog.Infof("Starting GLBC image: %v, cluster name %v", imageVersion, *clusterName)
	if *defaultSvc == "" {
		glog.Fatalf("Please specify --default-backend")
	}

	// Create kubeclient
	if *inCluster {
		if kubeClient, err = client.NewInCluster(); err != nil {
			glog.Fatalf("Failed to create client: %v.", err)
		}
	} else {
		config, err := clientConfig.ClientConfig()
		if err != nil {
			glog.Fatalf("error connecting to the client: %v", err)
		}
		kubeClient, err = client.New(config)
	}
	// Wait for the default backend Service. There's no pretty way to do this.
	parts := strings.Split(*defaultSvc, "/")
	if len(parts) != 2 {
		glog.Fatalf("Default backend should take the form namespace/name: %v",
			*defaultSvc)
	}
	defaultBackendNodePort, err := getNodePort(kubeClient, parts[0], parts[1])
	if err != nil {
		glog.Fatalf("Could not configure default backend %v: %v",
			*defaultSvc, err)
	}

	if *inCluster || *useRealCloud {
		// Create cluster manager
		namer, err := newNamer(kubeClient, *clusterName)
		if err != nil {
			glog.Fatalf("%v", err)
		}
		clusterManager, err = controller.NewClusterManager(*configFilePath, namer, defaultBackendNodePort, *healthCheckPath)
		if err != nil {
			glog.Fatalf("%v", err)
		}
	} else {
		// Create fake cluster manager
		clusterManager = controller.NewFakeClusterManager(*clusterName).ClusterManager
	}

	// Start loadbalancer controller
	lbc, err := controller.NewLoadBalancerController(kubeClient, clusterManager, *resyncPeriod, *watchNamespace)
	if err != nil {
		glog.Fatalf("%v", err)
	}
	if clusterManager.ClusterNamer.GetClusterName() != "" {
		glog.V(3).Infof("Cluster name %+v", clusterManager.ClusterNamer.GetClusterName())
	}
	clusterManager.Init(&controller.GCETranslator{lbc})
	go registerHandlers(lbc)
	go handleSigterm(lbc, *deleteAllOnQuit)

	lbc.Run()
	for {
		glog.Infof("Handled quit, awaiting pod deletion.")
		time.Sleep(30 * time.Second)
	}
}
Esempio n. 13
0
// main function for GLBC.
func main() {
	// TODO: Add a healthz endpoint
	var kubeClient *client.Client
	var err error
	var clusterManager *ClusterManager
	flags.Parse(os.Args)
	clientConfig := kubectl_util.DefaultClientConfig(flags)

	if *defaultBackendNodePort == 0 {
		glog.Fatalf("Please specify --default-backend-node-port")
	}

	if *proxyUrl != "" {
		// Create proxy kubeclient
		kubeClient = client.NewOrDie(&client.Config{
			Host: *proxyUrl, Version: "v1"})
	} else {
		// Create kubeclient
		if *inCluster {
			if kubeClient, err = client.NewInCluster(); err != nil {
				glog.Fatalf("Failed to create client: %v.", err)
			}
		} else {
			config, err := clientConfig.ClientConfig()
			if err != nil {
				glog.Fatalf("error connecting to the client: %v", err)
			}
			kubeClient, err = client.New(config)
		}
	}
	if *proxyUrl == "" && *inCluster {
		// Create cluster manager
		clusterManager, err = NewClusterManager(
			*clusterName, *defaultBackendNodePort, *healthCheckPath)
		if err != nil {
			glog.Fatalf("%v", err)
		}
	} else {
		// Create fake cluster manager
		fcm, err := newFakeClusterManager(*clusterName)
		if err != nil {
			glog.Fatalf("%v", err)
		}
		clusterManager = fcm.ClusterManager
	}

	// Start loadbalancer controller
	lbc, err := NewLoadBalancerController(kubeClient, clusterManager, *resyncPeriod)
	if err != nil {
		glog.Fatalf("%v", err)
	}
	glog.Infof("Created lbc %+v", lbc)
	go registerHandlers(lbc)
	if *deleteAllOnQuit {
		go handleSigterm(lbc)
	}
	lbc.Run()
	for {
		glog.Infof("Handled quit, awaiting pod deletion.")
		time.Sleep(30 * time.Second)
	}
}
Esempio n. 14
0
// NewIngressController returns a configured Ingress controller
func NewIngressController(backend ingress.Controller) *GenericController {
	var (
		flags = pflag.NewFlagSet("", pflag.ExitOnError)

		defaultSvc = flags.String("default-backend-service", "",
			`Service used to serve a 404 page for the default backend. Takes the form
    	namespace/name. The controller uses the first node port of this Service for
    	the default backend.`)

		ingressClass = flags.String("ingress-class", "",
			`Name of the ingress class to route through this controller.`)

		configMap = flags.String("configmap", "",
			`Name of the ConfigMap that contains the custom configuration to use`)

		publishSvc = flags.String("publish-service", "",
			`Service fronting the ingress controllers. Takes the form
 		namespace/name. The controller will set the endpoint records on the
 		ingress objects to reflect those on the service.`)

		tcpConfigMapName = flags.String("tcp-services-configmap", "",
			`Name of the ConfigMap that contains the definition of the TCP services to expose.
		The key in the map indicates the external port to be used. The value is the name of the
		service with the format namespace/serviceName and the port of the service could be a 
		number of the name of the port.
		The ports 80 and 443 are not allowed as external ports. This ports are reserved for the backend`)

		udpConfigMapName = flags.String("udp-services-configmap", "",
			`Name of the ConfigMap that contains the definition of the UDP services to expose.
		The key in the map indicates the external port to be used. The value is the name of the
		service with the format namespace/serviceName and the port of the service could be a 
		number of the name of the port.`)

		resyncPeriod = flags.Duration("sync-period", 60*time.Second,
			`Relist and confirm cloud resources this often.`)

		watchNamespace = flags.String("watch-namespace", api.NamespaceAll,
			`Namespace to watch for Ingress. Default is to watch all namespaces`)

		healthzPort = flags.Int("healthz-port", 10254, "port for healthz endpoint.")

		profiling = flags.Bool("profiling", true, `Enable profiling via web interface host:port/debug/pprof/`)

		defSSLCertificate = flags.String("default-ssl-certificate", "", `Name of the secret 
		that contains a SSL certificate to be used as default for a HTTPS catch-all server`)

		defHealthzURL = flags.String("health-check-path", "/healthz", `Defines 
		the URL to be used as health check inside in the default server in NGINX.`)
	)

	flags.AddGoFlagSet(flag.CommandLine)
	flags.Parse(os.Args)
	clientConfig := kubectl_util.DefaultClientConfig(flags)

	flag.Set("logtostderr", "true")

	glog.Info(backend.Info())

	if *ingressClass != "" {
		glog.Infof("Watching for ingress class: %s", *ingressClass)
	}

	if *defaultSvc == "" {
		glog.Fatalf("Please specify --default-backend-service")
	}

	kubeconfig, err := restclient.InClusterConfig()
	if err != nil {
		kubeconfig, err = clientConfig.ClientConfig()
		if err != nil {
			glog.Fatalf("error configuring the client: %v", err)
		}
	}

	kubeClient, err := clientset.NewForConfig(kubeconfig)
	if err != nil {
		glog.Fatalf("failed to create client: %v", err)
	}

	_, err = k8s.IsValidService(kubeClient, *defaultSvc)
	if err != nil {
		glog.Fatalf("no service with name %v found: %v", *defaultSvc, err)
	}
	glog.Infof("validated %v as the default backend", *defaultSvc)

	if *publishSvc != "" {
		svc, err := k8s.IsValidService(kubeClient, *publishSvc)
		if err != nil {
			glog.Fatalf("no service with name %v found: %v", *publishSvc, err)
		}

		if len(svc.Status.LoadBalancer.Ingress) == 0 {
			// We could poll here, but we instead just exit and rely on k8s to restart us
			glog.Fatalf("service %s does not (yet) have ingress points", *publishSvc)
		}

		glog.Infof("service %v validated as source of Ingress status", *publishSvc)
	}

	if *configMap != "" {
		_, _, err = k8s.ParseNameNS(*configMap)
		if err != nil {
			glog.Fatalf("configmap error: %v", err)
		}
	}

	os.MkdirAll(ingress.DefaultSSLDirectory, 0655)

	config := &Configuration{
		Client:                kubeClient,
		ResyncPeriod:          *resyncPeriod,
		DefaultService:        *defaultSvc,
		IngressClass:          *ingressClass,
		Namespace:             *watchNamespace,
		ConfigMapName:         *configMap,
		TCPConfigMapName:      *tcpConfigMapName,
		UDPConfigMapName:      *udpConfigMapName,
		DefaultSSLCertificate: *defSSLCertificate,
		DefaultHealthzURL:     *defHealthzURL,
		PublishService:        *publishSvc,
		Backend:               backend,
	}

	ic := newIngressController(config)
	go registerHandlers(*profiling, *healthzPort, ic)
	return ic
}