// main function for GLBC. func main() { // TODO: Add a healthz endpoint var kubeClient *client.Client var err error var clusterManager *controller.ClusterManager flags.Parse(os.Args) clientConfig := kubectl_util.DefaultClientConfig(flags) // Set glog verbosity levels if *verbose { go_flag.Lookup("logtostderr").Value.Set("true") go_flag.Set("v", "4") } glog.Infof("Starting GLBC image: %v, cluster name %v", imageVersion, *clusterName) if *defaultSvc == "" { glog.Fatalf("Please specify --default-backend") } // Create kubeclient if *inCluster { if kubeClient, err = client.NewInCluster(); err != nil { glog.Fatalf("Failed to create client: %v.", err) } } else { config, err := clientConfig.ClientConfig() if err != nil { glog.Fatalf("error connecting to the client: %v", err) } kubeClient, err = client.New(config) } // Wait for the default backend Service. There's no pretty way to do this. parts := strings.Split(*defaultSvc, "/") if len(parts) != 2 { glog.Fatalf("Default backend should take the form namespace/name: %v", *defaultSvc) } defaultBackendNodePort, err := getNodePort(kubeClient, parts[0], parts[1]) if err != nil { glog.Fatalf("Could not configure default backend %v: %v", *defaultSvc, err) } if *inCluster || *useRealCloud { // Create cluster manager name, err := getClusterUID(kubeClient, *clusterName) if err != nil { glog.Fatalf("%v", err) } clusterManager, err = controller.NewClusterManager(*configFilePath, name, defaultBackendNodePort, *healthCheckPath) if err != nil { glog.Fatalf("%v", err) } } else { // Create fake cluster manager clusterManager = controller.NewFakeClusterManager(*clusterName).ClusterManager } // Start loadbalancer controller lbc, err := controller.NewLoadBalancerController(kubeClient, clusterManager, *resyncPeriod, *watchNamespace) if err != nil { glog.Fatalf("%v", err) } if clusterManager.ClusterNamer.ClusterName != "" { glog.V(3).Infof("Cluster name %+v", clusterManager.ClusterNamer.ClusterName) } clusterManager.Init(&controller.GCETranslator{lbc}) go registerHandlers(lbc) go handleSigterm(lbc, *deleteAllOnQuit) lbc.Run() for { glog.Infof("Handled quit, awaiting pod deletion.") time.Sleep(30 * time.Second) } }
// main function for GLBC. func main() { // TODO: Add a healthz endpoint var kubeClient *client.Client var err error var clusterManager *controller.ClusterManager // TODO: We can simply parse all go flags with // flags.AddGoFlagSet(go_flag.CommandLine) // but that pollutes --help output with a ton of standard go flags. // We only really need a binary switch from light, v(2) logging to // heavier debug style V(4) logging, which we use --verbose for. flags.Parse(os.Args) clientConfig := kubectl_util.DefaultClientConfig(flags) // Set glog verbosity levels, unconditionally set --alsologtostderr. go_flag.Lookup("logtostderr").Value.Set("true") if *verbose { go_flag.Set("v", "4") } glog.Infof("Starting GLBC image: %v, cluster name %v", imageVersion, *clusterName) if *defaultSvc == "" { glog.Fatalf("Please specify --default-backend") } // Create kubeclient if *inCluster { if kubeClient, err = client.NewInCluster(); err != nil { glog.Fatalf("Failed to create client: %v.", err) } } else { config, err := clientConfig.ClientConfig() if err != nil { glog.Fatalf("error connecting to the client: %v", err) } kubeClient, err = client.New(config) } // Wait for the default backend Service. There's no pretty way to do this. parts := strings.Split(*defaultSvc, "/") if len(parts) != 2 { glog.Fatalf("Default backend should take the form namespace/name: %v", *defaultSvc) } defaultBackendNodePort, err := getNodePort(kubeClient, parts[0], parts[1]) if err != nil { glog.Fatalf("Could not configure default backend %v: %v", *defaultSvc, err) } if *inCluster || *useRealCloud { // Create cluster manager namer, err := newNamer(kubeClient, *clusterName) if err != nil { glog.Fatalf("%v", err) } clusterManager, err = controller.NewClusterManager(*configFilePath, namer, defaultBackendNodePort, *healthCheckPath) if err != nil { glog.Fatalf("%v", err) } } else { // Create fake cluster manager clusterManager = controller.NewFakeClusterManager(*clusterName).ClusterManager } // Start loadbalancer controller lbc, err := controller.NewLoadBalancerController(kubeClient, clusterManager, *resyncPeriod, *watchNamespace) if err != nil { glog.Fatalf("%v", err) } if clusterManager.ClusterNamer.GetClusterName() != "" { glog.V(3).Infof("Cluster name %+v", clusterManager.ClusterNamer.GetClusterName()) } clusterManager.Init(&controller.GCETranslator{lbc}) go registerHandlers(lbc) go handleSigterm(lbc, *deleteAllOnQuit) lbc.Run() for { glog.Infof("Handled quit, awaiting pod deletion.") time.Sleep(30 * time.Second) } }