func main() { flags.Parse(os.Args) if *defaultSvc == "" { glog.Fatalf("Please specify --default-backend") } kubeClient, err := unversioned.NewInCluster() if err != nil { glog.Fatalf("failed to create client: %v", err) } lbInfo, _ := getLBDetails(kubeClient) defSvc := getService(kubeClient, *defaultSvc) defError := getService(kubeClient, *customErrorSvc) // Start loadbalancer controller lbc, err := NewLoadBalancerController(kubeClient, *resyncPeriod, defSvc, defError, *watchNamespace, lbInfo) if err != nil { glog.Fatalf("%v", err) } lbc.Run() for { glog.Infof("Handled quit, awaiting pod deletion.") time.Sleep(30 * time.Second) } }
func main() { clientConfig := kubectl_util.DefaultClientConfig(flags) flags.Parse(os.Args) cfg := parseCfg(*config, *lbDefAlgorithm) var kubeClient *unversioned.Client var err error defErrorPage := newStaticPageHandler(*errorPage, defaultErrorPage) if defErrorPage == nil { glog.Fatalf("Failed to load the default error page") } go registerHandlers(defErrorPage) var tcpSvcs map[string]int if *tcpServices != "" { tcpSvcs = parseTCPServices(*tcpServices) } else { glog.Infof("No tcp/https services specified") } if *startSyslog { cfg.startSyslog = true _, err = newSyslogServer("/var/run/haproxy.log.socket") if err != nil { glog.Fatalf("Failed to start syslog server: %v", err) } } if *cluster { if kubeClient, err = unversioned.NewInCluster(); err != nil { glog.Fatalf("Failed to create client: %v", err) } } else { config, err := clientConfig.ClientConfig() if err != nil { glog.Fatalf("error connecting to the client: %v", err) } kubeClient, err = unversioned.New(config) } namespace, specified, err := clientConfig.Namespace() if err != nil { glog.Fatalf("unexpected error: %v", err) } if !specified { namespace = api.NamespaceAll } // TODO: Handle multiple namespaces lbc := newLoadBalancerController(cfg, kubeClient, namespace, tcpSvcs) go lbc.epController.Run(util.NeverStop) go lbc.svcController.Run(util.NeverStop) if *dry { dryRun(lbc) } else { lbc.cfg.reload() util.Until(lbc.worker, time.Second, util.NeverStop) } }
func main() { kubeClient, err := client.NewInCluster() if err != nil { log.Fatalf("Failed to create client: %v", err) } listAll := api.ListOptions{LabelSelector: labels.Everything(), FieldSelector: fields.Everything()} nodes, err := kubeClient.Nodes().List(listAll) if err != nil { log.Fatalf("Failed to list nodes: %v", err) } log.Printf("Nodes:") for _, node := range nodes.Items { log.Printf("\t%v", node.Name) } services, err := kubeClient.Services(api.NamespaceDefault).List(listAll) if err != nil { log.Fatalf("Failed to list services: %v", err) } log.Printf("Services:") for _, svc := range services.Items { log.Printf("\t%v", svc.Name) } log.Printf("Success") http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "Ok") }) log.Fatal(http.ListenAndServe(":8080", nil)) }
// Run runs the git-receive hook. This func is effectively the main for the git-receive hook, // although it is called from the main in boot.go. func Run(conf *Config, fs sys.FS, env sys.Env, storageDriver storagedriver.StorageDriver) error { log.Debug("Running git hook") builderKey, err := builderconf.GetBuilderKey() if err != nil { return err } kubeClient, err := client.NewInCluster() if err != nil { return fmt.Errorf("couldn't reach the api server (%s)", err) } scanner := bufio.NewScanner(os.Stdin) for scanner.Scan() { line := scanner.Text() oldRev, newRev, refName, err := readLine(line) if err != nil { return fmt.Errorf("reading STDIN (%s)", err) } log.Debug("read [%s,%s,%s]", oldRev, newRev, refName) // if we're processing a receive-pack on an existing repo, run a build if strings.HasPrefix(conf.SSHOriginalCommand, "git-receive-pack") { if err := build(conf, storageDriver, kubeClient, fs, env, builderKey, newRev); err != nil { return err } } } return scanner.Err() }
func main() { var ingClient client.IngressInterface if kubeClient, err := client.NewInCluster(); err != nil { log.Fatalf("Failed to create client: %v.", err) } else { ingClient = kubeClient.Extensions().Ingress(os.Getenv("INGRESS_NAMESPACE")) } tmpl, _ := template.New("haproxy").Parse(haproxyConf) rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1) known := &extensions.IngressList{} // Controller loop shellOut("haproxy -f /etc/haproxy/haproxy.cfg -p /var/run/haproxy-private.pid") for { rateLimiter.Accept() ingresses, err := ingClient.List(api.ListOptions{}) if err != nil { log.Printf("Error retrieving ingresses: %v", err) continue } if reflect.DeepEqual(ingresses.Items, known.Items) { log.Printf("Nothing Has Changed") continue } known = ingresses if w, err := os.Create("/etc/haproxy/haproxy.cfg"); err != nil { log.Fatalf("Failed to open %v: %v", haproxyConf, err) defer w.Close() } else if err := tmpl.Execute(w, ingresses); err != nil { log.Fatalf("Failed to write template %v", err) } restartHaproxy("haproxy_reload") } }
// Find all sibling pods in the service and post to their /write handler. func contactOthers(state *State) { const waitTimeout = 2 * time.Minute defer state.doneContactingPeers() client, err := client.NewInCluster() if err != nil { log.Fatalf("Unable to create client; error: %v\n", err) } // Double check that that worked by getting the server version. if v, err := client.Discovery().ServerVersion(); err != nil { log.Fatalf("Unable to get server version: %v\n", err) } else { log.Printf("Server version: %#v\n", v) } for start := time.Now(); time.Since(start) < waitTimeout; time.Sleep(5 * time.Second) { eps := getWebserverEndpoints(client) if eps.Len() >= *peerCount { break } state.Logf("%v/%v has %v endpoints, which is less than %v as expected. Waiting for all endpoints to come up.", *namespace, *service, len(eps), *peerCount) } // Do this repeatedly, in case there's some propagation delay with getting // newly started pods into the endpoints list. for i := 0; i < 15; i++ { eps := getWebserverEndpoints(client) for ep := range eps { state.Logf("Attempting to contact %s", ep) contactSingle(ep, state) } time.Sleep(5 * time.Second) } }
func main() { var ingClient client.IngressInterface if kubeClient, err := client.NewInCluster(); err != nil { log.Fatalf("Failed to create client: %v.", err) } else { ingClient = kubeClient.Extensions().Ingress(api.NamespaceAll) } tmpl, _ := template.New("nginx").Parse(nginxConf) rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1) known := &extensions.IngressList{} // Controller loop shellOut("nginx") for { rateLimiter.Accept() ingresses, err := ingClient.List(labels.Everything(), fields.Everything()) if err != nil || reflect.DeepEqual(ingresses.Items, known.Items) { continue } known = ingresses if w, err := os.Create("/etc/nginx/nginx.conf"); err != nil { log.Fatalf("Failed to open %v: %v", nginxConf, err) } else if err := tmpl.Execute(w, ingresses); err != nil { log.Fatalf("Failed to write template %v", err) } shellOut("nginx -s reload") } }
func main() { nginx.Start() kubeClient, err := client.NewInCluster() if err != nil { log.Fatalf("Failed to create client: %v.", err) } rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1) known := &model.RouterConfig{} // Main loop for { rateLimiter.Accept() routerConfig, err := model.Build(kubeClient) if err != nil { log.Printf("Error building model; not modifying certs or configuration: %v.", err) continue } if reflect.DeepEqual(routerConfig, known) { continue } log.Println("INFO: Router configuration has changed in k8s.") err = nginx.WriteCerts(routerConfig, "/opt/nginx/ssl") if err != nil { log.Printf("Failed to write certs; continuing with existing certs and configuration: %v", err) continue } err = nginx.WriteConfig(routerConfig, "/opt/nginx/conf/nginx.conf") if err != nil { log.Printf("Failed to write new nginx configuration; continuing with existing configuration: %v", err) continue } nginx.Reload() known = routerConfig } }
func main() { var ingClient client.IngressInterface if kubeClient, err := client.NewInCluster(); err != nil { log.Fatalf("Failed to create client: %v.", err) } else { ingClient = kubeClient.Extensions().Ingress("devops-test") } tmpl, _ := template.New("nginx").Parse(nginxConf) rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1) known := &extensions.IngressList{} // Controller loop shellOut("nginx") for { rateLimiter.Accept() ingresses, err := ingClient.List(api.ListOptions{}) if err != nil { log.Printf("Error retrieving ingresses: %v", err) continue } if reflect.DeepEqual(ingresses.Items, known.Items) { continue } known = ingresses if w, err := os.Create("/etc/nginx/nginx.conf"); err != nil { log.Fatalf("Failed to open %v: %v", nginxConf, err) } else if err := tmpl.Execute(w, ingresses); err != nil { log.Fatalf("Failed to write template %v", err) } restartNginx("nginx -t") } }
func runJoinCluster(cmd *cobra.Command, args []string) error { cli, err := client.NewInCluster() if err != nil { return fmt.Errorf("unable to connect k8s api server: %v", err) } labelSelector, err := labels.Parse(influxSelectors) if err != nil { return fmt.Errorf("unable to parse labels: %v", err) } fieldSelector := fields.Everything() podIPs, err := podIps(cli, labelSelector, fieldSelector) if err != nil { return err } hostIP, err := externalIP() if err != nil { return err } peers := influxdbPeers(hostIP, podIPs) iOpts := influxdOpts(hostIP, peers) if err := ioutil.WriteFile(envVarFile, []byte(iOpts), 0644); err != nil { return err } return nil }
func (fs Filesystem) checksyncstatus(path string) error { path = strings.TrimPrefix(path, "/home/minio") path = "mnt/minio/data" + path var lock sync.RWMutex nosync := make(map[string]bool) kubeClient, err := client.NewInCluster() if err != nil { return fmt.Errorf("unable to create client") } pclient := kubeClient.Pods("default") selector, _ := labels.Parse("app=minio-sync") list, err := pclient.List(selector, nil) if err != nil { return fmt.Errorf("list pods failed") } for _, pod := range list.Items { fmt.Println(pod.Status.PodIP) if pod.Status.Phase == "Running" { nosync[pod.Status.PodIP] = false } } allsync := true var duration float64 for duration = 1; duration < 60; duration++ { timeperiod := time.Duration(time.Second * time.Duration(duration)) fmt.Println(timeperiod) time.Sleep(timeperiod) var wg sync.WaitGroup wg.Add(len(nosync)) for ip, sync := range nosync { go func(ip string, sync bool) { if !sync { if doCurl("http://" + ip + ":3000/" + path) { lock.Lock() nosync[ip] = true lock.Unlock() } else { if allsync { allsync = false } } } wg.Done() }(ip, sync) } wg.Wait() if allsync { break } allsync = true } for _, sync := range nosync { if !sync { return fmt.Errorf("sync failed took more time ") } } return nil }
func main() { var ingClient client.IngressInterface var secretsClient client.SecretsInterface /* Anon http client config := client.Config{ Host: "http://localhost:8080", Username: "******", Password: "******", } kubeClient, err := client.New(&config) */ kubeClient, err := client.NewInCluster() if err != nil { log.Fatalf("Failed to create client: %v.", err) } else { ingClient = kubeClient.Extensions().Ingress(api.NamespaceAll) secretsClient = kubeClient.Secrets(api.NamespaceAll) } tmpl := template.New("nginx.tmpl").Funcs(template.FuncMap{"hasprefix": hasPrefix, "hassuffix": hasSuffix}) if _, err := tmpl.ParseFiles("./nginx.tmpl"); err != nil { log.Fatalf("Failed to parse template %v", err) } rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1) known := &extensions.IngressList{} known_secrets := &api.SecretList{} // Controller loop shellOut("nginx") for { rateLimiter.Accept() ingresses, err := ingClient.List(api.ListOptions{}) if err != nil { log.Printf("Error retrieving ingresses: %v", err) continue } secrets, err := secretsClient.List(api.ListOptions{}) if err != nil { log.Printf("Error retrieving secrets: %v", err) continue } if reflect.DeepEqual(ingresses.Items, known.Items) && reflect.DeepEqual(secrets.Items, known_secrets.Items) { continue } // Process SSL context // old values known = ingresses known_secrets = secrets // context variable context := &Context{Ingress: ingresses, Secrets: secrets} if w, err := os.Create("/etc/nginx/nginx.conf"); err != nil { log.Fatalf("Failed to open %v: %v", err) } else if err := tmpl.Execute(w, context); err != nil { log.Fatalf("Failed to write template %v", err) } shellOut("nginx -s reload") } }
func main() { var kubeClient *unversioned.Client flags.AddGoFlagSet(flag.CommandLine) flags.Parse(os.Args) clientConfig := kubectl_util.DefaultClientConfig(flags) glog.Infof("Using build: %v - %v", gitRepo, version) if *buildCfg { fmt.Printf("Example of ConfigMap to customize NGINX configuration:\n%v", nginx.ConfigMapAsString()) os.Exit(0) } if *defaultSvc == "" { glog.Fatalf("Please specify --default-backend-service") } var err error if *inCluster { kubeClient, err = unversioned.NewInCluster() } else { config, connErr := clientConfig.ClientConfig() if connErr != nil { glog.Fatalf("error connecting to the client: %v", err) } kubeClient, err = unversioned.New(config) } if err != nil { glog.Fatalf("failed to create client: %v", err) } runtimePodInfo := &podInfo{NodeIP: "127.0.0.1"} if *inCluster { runtimePodInfo, err = getPodDetails(kubeClient) if err != nil { glog.Fatalf("unexpected error getting runtime information: %v", err) } } if err := isValidService(kubeClient, *defaultSvc); err != nil { glog.Fatalf("no service with name %v found: %v", *defaultSvc, err) } glog.Infof("Validated %v as the default backend", *defaultSvc) lbc, err := newLoadBalancerController(kubeClient, *resyncPeriod, *defaultSvc, *watchNamespace, *nxgConfigMap, *tcpConfigMapName, *udpConfigMapName, runtimePodInfo) if err != nil { glog.Fatalf("%v", err) } go registerHandlers(lbc) go handleSigterm(lbc) lbc.Run() for { glog.Infof("Handled quit, awaiting pod deletion") time.Sleep(30 * time.Second) } }
func main() { flag.Parse() glog.Info("Kubernetes Elasticsearch logging discovery") c, err := client.NewInCluster() if err != nil { glog.Fatalf("Failed to make client: %v", err) } namespace := api.NamespaceSystem envNamespace := os.Getenv("NAMESPACE") if envNamespace != "" { if _, err := c.Namespaces().Get(envNamespace); err != nil { glog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err) } namespace = envNamespace } var elasticsearch *api.Service // Look for endpoints associated with the Elasticsearch loggging service. // First wait for the service to become available. for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) { elasticsearch, err = c.Services(namespace).Get("elasticsearch-logging") if err == nil { break } } // If we did not find an elasticsearch logging service then log a warning // and return without adding any unicast hosts. if elasticsearch == nil { glog.Warningf("Failed to find the elasticsearch-logging service: %v", err) return } var endpoints *api.Endpoints addrs := []string{} // Wait for some endpoints. count := 0 for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) { endpoints, err = c.Endpoints(namespace).Get("elasticsearch-logging") if err != nil { continue } addrs = flattenSubsets(endpoints.Subsets) glog.Infof("Found %s", addrs) if len(addrs) > 0 && len(addrs) == count { break } count = len(addrs) } // If there was an error finding endpoints then log a warning and quit. if err != nil { glog.Warningf("Error finding endpoints: %v", err) return } glog.Infof("Endpoints = %s", addrs) fmt.Printf("discovery.zen.ping.unicast.hosts: [%s]\n", strings.Join(addrs, ", ")) }
func main() { flags.AddGoFlagSet(flag.CommandLine) flags.Parse(os.Args) clientConfig := kubectl_util.DefaultClientConfig(flags) glog.Infof("Using build: %v - %v", gitRepo, version) if *defaultSvc == "" { glog.Fatalf("Please specify --default-backend-service") } kubeClient, err := unversioned.NewInCluster() if err != nil { config, err := clientConfig.ClientConfig() if err != nil { glog.Fatalf("error configuring the client: %v", err) } kubeClient, err = unversioned.New(config) if err != nil { glog.Fatalf("failed to create client: %v", err) } } runtimePodInfo, err := getPodDetails(kubeClient) if err != nil { runtimePodInfo = &podInfo{NodeIP: "127.0.0.1"} glog.Warningf("unexpected error getting runtime information: %v", err) } if err := isValidService(kubeClient, *defaultSvc); err != nil { glog.Fatalf("no service with name %v found: %v", *defaultSvc, err) } glog.Infof("Validated %v as the default backend", *defaultSvc) if *nxgConfigMap != "" { _, _, err = parseNsName(*nxgConfigMap) if err != nil { glog.Fatalf("configmap error: %v", err) } } lbc, err := newLoadBalancerController(kubeClient, *resyncPeriod, *defaultSvc, *watchNamespace, *nxgConfigMap, *tcpConfigMapName, *udpConfigMapName, *defSSLCertificate, *defHealthzURL, runtimePodInfo) if err != nil { glog.Fatalf("%v", err) } go registerHandlers(lbc) go handleSigterm(lbc) lbc.Run() for { glog.Infof("Handled quit, awaiting pod deletion") time.Sleep(30 * time.Second) } }
func main() { clientConfig := kubectl_util.DefaultClientConfig(flags) flags.Parse(os.Args) var err error var kubeClient *unversioned.Client if *cluster { if kubeClient, err = unversioned.NewInCluster(); err != nil { glog.Fatalf("Failed to create client: %v", err) } } else { config, err := clientConfig.ClientConfig() if err != nil { glog.Fatalf("error connecting to the client: %v", err) } kubeClient, err = unversioned.New(config) } namespace, specified, err := clientConfig.Namespace() if err != nil { glog.Fatalf("unexpected error: %v", err) } if !specified { namespace = "" } err = loadIPVModule() if err != nil { glog.Fatalf("Terminating execution: %v", err) } err = changeSysctl() if err != nil { glog.Fatalf("Terminating execution: %v", err) } err = resetIPVS() if err != nil { glog.Fatalf("Terminating execution: %v", err) } glog.Info("starting LVS configuration") if *useUnicast { glog.Info("keepalived will use unicast to sync the nodes") } ipvsc := newIPVSController(kubeClient, namespace, *useUnicast, *password) go ipvsc.epController.Run(wait.NeverStop) go ipvsc.svcController.Run(wait.NeverStop) go wait.Until(ipvsc.worker, time.Second, wait.NeverStop) time.Sleep(5 * time.Second) glog.Info("starting keepalived to announce VIPs") ipvsc.keepalived.Start() }
func main() { var ingClient client.IngressInterface if kubeClient, err := client.NewInCluster(); err != nil { log.Fatalf("Failed to create client: %v.", err) } else { ingClient = kubeClient.Extensions().Ingress(api.NamespaceAll) } tmpl, _ := template.New("nginx").Parse(nginxConf) tmplSSL, _ := template.New("nginx").Parse(nginxSSLConf) rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1) known := &extensions.IngressList{} // Controller loop //shellOut("nginx") for { rateLimiter.Accept() ingresses, err := ingClient.List(api.ListOptions{}) fmt.Printf("Port %+v\n", ingresses.Items[0].Spec.Rules[0].HTTP.Paths[0].Backend.ServicePort.IntValue()) fmt.Printf("Path %+v\n", ingresses.Items[0].Spec.Rules[0].HTTP.Paths[0].Path) var httpIngressList extensions.IngressList var httpsIngressList extensions.IngressList for _, v := range ingresses.Items { fmt.Printf("%#v\n", v) if v.Spec.Rules[0].HTTP.Paths[0].Backend.ServicePort.IntVal == 80 { httpIngressList.Items = append(httpIngressList.Items, v) } else if v.Spec.Rules[0].HTTP.Paths[0].Backend.ServicePort.IntVal == 443 { httpsIngressList.Items = append(httpsIngressList.Items, v) } } if err != nil { log.Printf("Error retrieving ingresses: %v", err) continue } if reflect.DeepEqual(ingresses.Items, known.Items) { continue } known = ingresses if w, err := os.Create("/etc/nginx/conf.d/appcloud.conf"); err != nil { log.Fatalf("Failed to open %v: %v", nginxConf, err) } else if err := tmpl.Execute(w, httpIngressList); err != nil { log.Fatalf("Failed to write template %v", err) } if w, err := os.Create("/etc/nginx/conf.d/appcloud-ssl.conf"); err != nil { log.Fatalf("Failed to open %v: %v", nginxSSLConf, err) } else if err := tmplSSL.Execute(w, httpsIngressList); err != nil { log.Fatalf("Failed to write template %v", err) } // shellOut("nginx -s reload") } }
func createKubeClient(flags *flag.FlagSet, inCluster bool) (*kube_client.Client, error) { if inCluster { return kube_client.NewInCluster() } clientConfig := kubectl_util.DefaultClientConfig(flags) config, err := clientConfig.ClientConfig() if err != nil { fmt.Errorf("error connecting to the client: %v", err) } return kube_client.NewOrDie(config), nil }
func main() { kubeClient, err := kcl.NewInCluster() if err != nil { log.Fatalf("Error creating new Kubernetes client (%s)", err) } apiClient, err := config.GetSwaggerClient(config.Spec.VersionsAPIURL) if err != nil { log.Fatalf("Error creating new swagger api client (%s)", err) } deisK8sResources := k8s.NewResourceInterfaceNamespaced(kubeClient, config.Spec.DeisNamespace) clusterID := data.NewClusterIDFromPersistentStorage(deisK8sResources.Secrets()) installedDeisData := data.NewInstalledDeisData(deisK8sResources) availableVersion := data.NewAvailableVersionsFromAPI( apiClient, config.Spec.VersionsAPIURL, ) availableComponentVersion := data.NewLatestReleasedComponent(deisK8sResources, availableVersion) pollDur := time.Duration(config.Spec.Polling) * time.Second // we want to do the following jobs according to our remote API interval: // 1. get latest stable deis component versions // 2. send diagnostic data, if appropriate glvdPeriodic := jobs.NewGetLatestVersionDataPeriodic( installedDeisData, clusterID, availableVersion, availableComponentVersion, pollDur, ) svPeriodic := jobs.NewSendVersionsPeriodic( apiClient, clusterID, deisK8sResources, availableVersion, pollDur, ) toDo := []jobs.Periodic{glvdPeriodic, svPeriodic} log.Printf("Starting periodic jobs at interval %s", pollDur) ch := jobs.DoPeriodic(toDo) defer close(ch) // Get a new router, with handler functions r := handlers.RegisterRoutes(mux.NewRouter(), availableVersion, deisK8sResources) // Bind to a port and pass our router in hostStr := fmt.Sprintf(":%s", config.Spec.Port) log.Printf("Serving on %s", hostStr) if err := http.ListenAndServe(hostStr, r); err != nil { close(ch) log.Println("Unable to open up TLS listener") log.Fatal("ListenAndServe: ", err) } }
func main() { flags.Parse(os.Args) cfg := parseCfg(*config) if len(*tcpServices) == 0 { glog.Infof("All tcp/https services will be ignored.") } go healthzServer() proc.StartReaper() var kubeClient *unversioned.Client var err error if *startSyslog { cfg.startSyslog = true _, err = newSyslogServer("/var/run/haproxy.log.socket") if err != nil { glog.Fatalf("Failed to start syslog server: %v", err) } } clientConfig := kubectl_util.DefaultClientConfig(flags) if *cluster { if kubeClient, err = unversioned.NewInCluster(); err != nil { glog.Fatalf("Failed to create client: %v", err) } } else { config, err := clientConfig.ClientConfig() if err != nil { glog.Fatalf("error connecting to the client: %v", err) } kubeClient, err = unversioned.New(config) } namespace, specified, err := clientConfig.Namespace() if err != nil { glog.Fatalf("unexpected error: %v", err) } if !specified { namespace = "default" } // TODO: Handle multiple namespaces lbc := newLoadBalancerController(cfg, kubeClient, namespace) go lbc.epController.Run(util.NeverStop) go lbc.svcController.Run(util.NeverStop) if *dry { dryRun(lbc) } else { lbc.cfg.reload() util.Until(lbc.worker, time.Second, util.NeverStop) } }
func getKubeClient(env *appEnv) (*kube.Client, error) { kubeClient, err := kube.NewInCluster() if err != nil { protolion.Errorf("Falling back to insecure kube client due to error from NewInCluster: %s", err.Error()) } else { return kubeClient, err } config := &kube.Config{ Host: fmt.Sprintf("%s:443", env.KubeAddress), Insecure: true, } return kube.New(config) }
func main() { flag.Parse() glog.Info("k8s programmatically create pods test.") c, err := k8s_client.NewInCluster() if err != nil { glog.Fatalf("Failed to make client: %v", err) } if err != nil { glog.Fatalf("Failed to make client: %v", err) } test_service := &k8s_api.Service{} // var test_pod *k8s_api.Pod test_service.Labels = make(map[string]string) test_service.Labels["name"] = "test" test_service.Name = "test" test_service.Spec.Selector = make(map[string]string) service_port := []k8s_api.ServicePort{ k8s_api.ServicePort{ Protocol: k8s_api.ProtocolTCP, Port: 8080, }, } test_service.Spec.Ports = service_port if _, err := c.Services("default").Create(test_service); err != nil { glog.Errorf("Failed to create service due to: %v", err) } s, err := NewSupervisor(c, 5*time.Second, k8s_api.NamespaceAll) s.StartPodManager() go s.podController.Run(s.stopCh) for { if !s.podController.HasSynced() { glog.Warning("Controller not synced yet!") } pods := s.podLister.Store.List() glog.Infof("List get %v pods", len(pods)) for _, obj := range pods { pod := obj.(*k8s_api.Pod) glog.Infof("Get Pod: %v/%v", pod.Namespace, pod.Name) } time.Sleep(20 * time.Second) } }
func getKubeClient() (*kube.Client, error) { kubeClient, err := kube.NewInCluster() if err != nil { protolog.Errorf("Falling back to insecure kube client due to error from NewInCluster: %s", err.Error()) } else { return kubeClient, err } kubeAddr, err := getKubeAddress() if err != nil { return nil, err } config := &kube.Config{ Host: kubeAddr, Insecure: true, } return kube.New(config) }
func getKubeClient() (*kube.Client, error) { kubeAddr, err := getKubeAddress() if err != nil { return nil, err } config := &kube.Config{ Host: kubeAddr, Insecure: true, } kubeClient, err := kube.New(config) if err != nil { protolog.Printf("Error insecure kube client: %s", err.Error()) } if kubeClient != nil { return kubeClient, nil } return kube.NewInCluster() }
func main() { flags.AddGoFlagSet(flag.CommandLine) flags.Parse(os.Args) if *buildCfg { fmt.Printf("Example of ConfigMap to customize NGINX configuration:\n%v", nginx.ConfigMapAsString()) os.Exit(0) } if *defaultSvc == "" { glog.Fatalf("Please specify --default-backend") } kubeClient, err := unversioned.NewInCluster() if err != nil { glog.Fatalf("failed to create client: %v", err) } lbInfo, err := getLBDetails(kubeClient) if err != nil { glog.Fatalf("unexpected error getting runtime information: %v", err) } err = isValidService(kubeClient, *defaultSvc) if err != nil { glog.Fatalf("no service with name %v found: %v", *defaultSvc, err) } lbc, err := newLoadBalancerController(kubeClient, *resyncPeriod, *defaultSvc, *watchNamespace, *nxgConfigMap, *tcpConfigMapName, *udpConfigMapName, lbInfo) if err != nil { glog.Fatalf("%v", err) } go registerHandlers(lbc) lbc.Run() for { glog.Infof("Handled quit, awaiting pod deletion") time.Sleep(30 * time.Second) } }
func do(appEnvObj interface{}) error { appEnv := appEnvObj.(*appEnv) rethinkAPIClient, err := getRethinkAPIClient(appEnv.DatabaseAddress, appEnv.DatabaseName) if err != nil { return err } pfsdAddress, err := getPfsdAddress() if err != nil { return err } clientConn, err := grpc.Dial(pfsdAddress, grpc.WithInsecure()) if err != nil { return err } pfsAPIClient := pfs.NewAPIClient(clientConn) kubeClient, err := kube.NewInCluster() if err != nil { protolog.Printf("Error creating kubernetes client: %s", err.Error()) } jobAPIServer := jobserver.NewAPIServer( pfsAPIClient, rethinkAPIClient, kubeClient, ) jobAPIClient := pps.NewLocalJobAPIClient(jobAPIServer) pipelineAPIServer := pipelineserver.NewAPIServer(pfsAPIClient, jobAPIClient, rethinkAPIClient) if err := pipelineAPIServer.Start(); err != nil { return err } return protoserver.Serve( uint16(appEnv.Port), func(s *grpc.Server) { pps.RegisterJobAPIServer(s, jobAPIServer) pps.RegisterPipelineAPIServer(s, pipelineAPIServer) }, protoserver.ServeOptions{ DebugPort: uint16(appEnv.DebugPort), Version: pachyderm.Version, }, ) }
func newKubeClient(config *Config) (kubeClient *kube.Client, kubeErr error) { if config.KubernetesURL == "" { kubeClient, kubeErr = kube.NewInCluster() } else { kubeConfig := &kube.Config{ Host: config.KubernetesURL, Username: config.KubernetesUsername, Password: config.KubernetesPassword, Insecure: true, } kubeClient, kubeErr = kube.New(kubeConfig) } if kubeErr != nil { glog.Errorf("Failed to create kubernetes client. Error: %v\n", kubeErr) kubeClient = nil } return }
func main() { flags.AddGoFlagSet(flag.CommandLine) flags.Parse(os.Args) if *defaultSvc == "" { glog.Fatalf("Please specify --default-backend") } glog.Info("Checking if DNS is working") ip, err := checkDNS(*defaultSvc) if err != nil { glog.Fatalf("Please check if the DNS addon is working properly.\n%v", err) } glog.Infof("IP address of '%v' service: %s", *defaultSvc, ip) kubeClient, err := unversioned.NewInCluster() if err != nil { glog.Fatalf("failed to create client: %v", err) } lbInfo, _ := getLBDetails(kubeClient) defSvc, err := getService(kubeClient, *defaultSvc) if err != nil { glog.Fatalf("no default backend service found: %v", err) } defError, _ := getService(kubeClient, *customErrorSvc) // Start loadbalancer controller lbc, err := NewLoadBalancerController(kubeClient, *resyncPeriod, defSvc, defError, *watchNamespace, lbInfo) if err != nil { glog.Fatalf("%v", err) } lbc.Run() for { glog.Infof("Handled quit, awaiting pod deletion.") time.Sleep(30 * time.Second) } }
// http://qiita.com/dtan4/items/f2f30207e0acec454c3d func GetClusterInfo() (string, error) { kubeClient, err := client.NewInCluster() if err != nil { return "", err } pods, err := kubeClient.Pods(kapi.NamespaceDefault).List(kapi.ListOptions{ LabelSelector: labels.SelectorFromSet(map[string]string{ "name": "chunk", "env": "staging", }), }) if err != nil { return "", err } podDescs := make([]string, len(pods.Items)) for ix, pod := range pods.Items { podDescs[ix] = fmt.Sprintf("%s : %s : %s", pod.Status.PodIP, pod.Status.Phase, pod.Name) } return strings.Join(podDescs, "\n"), nil }
func main() { flags.Parse(os.Args) proc.StartReaper() var kubeClient *client.Client if *cluster { clusterClient, err := client.NewInCluster() if err != nil { glog.Fatalf("Failed to create client: %v", err) } kubeClient = clusterClient } else { config := &client.Config{ Host: *master, } confClient, err := client.New(config) if err != nil { glog.Fatalf("Could not create api client %v", err) } kubeClient = confClient } err := haproxy.StartSyslogServer("/var/run/haproxy.log.sock") if err != nil { glog.Fatalf("Failed to start syslog server: %v", err) } lbc := newLoadBalancerController(kubeClient, "default", *domain, strings.Split(*nodes, ",")) go healthzServer() go lbc.epController.Run(util.NeverStop) go lbc.svcController.Run(util.NeverStop) util.Until(lbc.worker, time.Second, util.NeverStop) }