// TODO(jdef): hacked from qinglet/server/server.go // TODO(k8s): replace this with clientcmd func (s *SchedulerServer) createAPIServerClient() (*client.Client, error) { authInfo, err := clientauth.LoadFromFile(s.AuthPath) if err != nil { log.Warningf("Could not load qingyuan auth path: %v. Continuing with defaults.", err) } if authInfo == nil { // authInfo didn't load correctly - continue with defaults. authInfo = &clientauth.Info{} } clientConfig, err := authInfo.MergeWithConfig(client.Config{}) if err != nil { return nil, err } if len(s.APIServerList) < 1 { return nil, fmt.Errorf("no api servers specified") } // TODO: adapt Qing client to support LB over several servers if len(s.APIServerList) > 1 { log.Infof("Multiple api servers specified. Picking first one") } clientConfig.Host = s.APIServerList[0] c, err := client.New(&clientConfig) if err != nil { return nil, err } return c, nil }
// TODO: evaluate using pkg/client/clientcmd func newQingClient() (*kclient.Client, error) { var ( config *kclient.Config err error masterURL string ) if *argQingMasterURL != "" { masterURL, err = getQingMasterURL() if err != nil { return nil, err } } if *argQingcfgFile == "" { if masterURL == "" { return nil, fmt.Errorf("--qing_master_url must be set when --qingcfg_file is not set") } config = &kclient.Config{ Host: masterURL, Version: "v1beta3", } } else { overrides := &kclientcmd.ConfigOverrides{} if masterURL != "" { overrides.ClusterInfo.Server = masterURL } if config, err = kclientcmd.NewNonInteractiveDeferredLoadingClientConfig( &kclientcmd.ClientConfigLoadingRules{ExplicitPath: *argQingcfgFile}, overrides).ClientConfig(); err != nil { return nil, err } } glog.Infof("Using %s for qingyuan master", config.Host) glog.Infof("Using qingyuan API %s", config.Version) return kclient.New(config) }
func main() { flag.Parse() glog.Info("Elasticsearch discovery") apiServer := *server if apiServer == "" { qingyuanService := os.Getenv("QINGYUAN_SERVICE_HOST") if qingyuanService == "" { glog.Fatalf("Please specify the QingYuan server with --server") } apiServer = fmt.Sprintf("https://%s:%s", qingyuanService, os.Getenv("QINGYUAN_SERVICE_PORT")) } glog.Infof("Server: %s", apiServer) glog.Infof("Namespace: %q", *namespace) glog.Infof("selector: %q", *selector) config := client.Config{ Host: apiServer, BearerToken: *token, Insecure: true, } c, err := client.New(&config) if err != nil { glog.Fatalf("Failed to make client: %v", err) } l, err := labels.Parse(*selector) if err != nil { glog.Fatalf("Failed to parse selector %q: %v", *selector, err) } pods, err := c.Pods(*namespace).List(l, fields.Everything()) if err != nil { glog.Fatalf("Failed to list pods: %v", err) } glog.Infof("Elasticsearch pods in namespace %s with selector %q", *namespace, *selector) podIPs := []string{} for i := range pods.Items { p := &pods.Items[i] for attempt := 0; attempt < 10; attempt++ { glog.Infof("%d: %s PodIP: %s", i, p.Name, p.Status.PodIP) if p.Status.PodIP != "" { podIPs = append(podIPs, fmt.Sprintf(`"%s"`, p.Status.PodIP)) break } time.Sleep(1 * time.Second) p, err = c.Pods(*namespace).Get(p.Name) if err != nil { glog.Warningf("Failed to get pod %s: %v", p.Name, err) } } if p.Status.PodIP == "" { glog.Warningf("Failed to obtain PodIP for %s", p.Name) } } fmt.Printf("discovery.zen.ping.unicast.hosts: [%s]\n", strings.Join(podIPs, ", ")) }
// Run runs the specified SchedulerServer. This should never exit. func (s *SchedulerServer) Run(_ []string) error { if s.Qingconfig == "" && s.Master == "" { glog.Warningf("Neither --qingconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified qingconfig // file, and then overriding the Master flag, if non-empty. qingconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Qingconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() if err != nil { return err } qingconfig.QPS = 20.0 qingconfig.Burst = 30 qingClient, err := client.New(qingconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() configFactory := factory.NewConfigFactory(qingClient) config, err := s.createConfig(configFactory) if err != nil { glog.Fatalf("Failed to create scheduler configuration: %v", err) } eventBroadcaster := record.NewBroadcaster() config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}) eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(qingClient.Events("")) sched := scheduler.New(config) sched.Run() select {} }
// ClientForVersion initializes or reuses a client for the specified version, or returns an // error if that is not possible func (c *clientCache) ClientForVersion(version string) (*client.Client, error) { if client, ok := c.clients[version]; ok { return client, nil } config, err := c.ClientConfigForVersion(version) if err != nil { return nil, err } client, err := client.New(config) if err != nil { return nil, err } c.clients[config.Version] = client return client, nil }
// Run runs the CMServer. This should never exit. func (s *CMServer) Run(_ []string) error { if s.Qingconfig == "" && s.Master == "" { glog.Warningf("Neither --qingconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified qingconfig // file, and then overriding the Master flag, if non-empty. qingconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Qingconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() if err != nil { return err } qingconfig.QPS = 20.0 qingconfig.Burst = 30 qingClient, err := client.New(qingconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() endpoints := service.NewEndpointController(qingClient) go endpoints.Run(s.ConcurrentEndpointSyncs, util.NeverStop) controllerManager := replicationControllerPkg.NewReplicationManager(qingClient, replicationControllerPkg.BurstReplicas) go controllerManager.Run(s.ConcurrentRCSyncs, util.NeverStop) cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if s.SyncNodeStatus { glog.Warning("DEPRECATION NOTICE: sync-node-status flag is being deprecated. It has no effect now and it will be removed in a future version.") } nodeController := nodecontroller.NewNodeController(cloud, qingClient, s.RegisterRetryCount, s.PodEvictionTimeout, nodecontroller.NewPodEvictor(util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst)), s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs) nodeController.Run(s.NodeSyncPeriod) serviceController := servicecontroller.New(cloud, qingClient, s.ClusterName) if err := serviceController.Run(s.NodeSyncPeriod); err != nil { glog.Errorf("Failed to start service controller: %v", err) } if s.AllocateNodeCIDRs { routes, ok := cloud.Routes() if !ok { glog.Fatal("Cloud provider must support routes if allocate-node-cidrs is set") } routeController := routecontroller.New(routes, qingClient, s.ClusterName, (*net.IPNet)(&s.ClusterCIDR)) routeController.Run(s.NodeSyncPeriod) } resourceQuotaManager := resourcequota.NewResourceQuotaManager(qingClient) resourceQuotaManager.Run(s.ResourceQuotaSyncPeriod) namespaceManager := namespace.NewNamespaceManager(qingClient, s.NamespaceSyncPeriod) namespaceManager.Run() pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(qingClient, s.PVClaimBinderSyncPeriod) pvclaimBinder.Run() pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(qingClient, s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins()) if err != nil { glog.Fatalf("Failed to start persistent volume recycler: %+v", err) } pvRecycler.Run() if len(s.ServiceAccountKeyFile) > 0 { privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile) if err != nil { glog.Errorf("Error reading key for service account token controller: %v", err) } else { serviceaccount.NewTokensController( qingClient, serviceaccount.DefaultTokenControllerOptions( serviceaccount.JWTTokenGenerator(privateKey), ), ).Run() } } serviceaccount.NewServiceAccountsController( qingClient, serviceaccount.DefaultServiceAccountsControllerOptions(), ).Run() select {} return nil }
// Run runs the specified QingletExecutorServer. func (s *QingletExecutorServer) Run(hks hyperqing.Interface, _ []string) error { rand.Seed(time.Now().UTC().UnixNano()) if err := util.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil { log.Info(err) } var apiclient *client.Client clientConfig, err := s.CreateAPIServerClientConfig() if err == nil { apiclient, err = client.New(clientConfig) } if err != nil { // required for k8sm since we need to send api.Binding information // back to the apiserver log.Fatalf("No API client: %v", err) } log.Infof("Using root directory: %v", s.RootDirectory) credentialprovider.SetPreferredDockercfgPath(s.RootDirectory) shutdownCloser, err := s.syncExternalShutdownWatcher() if err != nil { return err } cadvisorInterface, err := cadvisor.New(s.CadvisorPort) if err != nil { return err } imageGCPolicy := qinglet.ImageGCPolicy{ HighThresholdPercent: s.ImageGCHighThresholdPercent, LowThresholdPercent: s.ImageGCLowThresholdPercent, } diskSpacePolicy := qinglet.DiskSpacePolicy{ DockerFreeDiskMB: s.LowDiskSpaceThresholdMB, RootFreeDiskMB: s.LowDiskSpaceThresholdMB, } //TODO(jdef) intentionally NOT initializing a cloud provider here since: //(a) the qinglet doesn't actually use it //(b) we don't need to create N-qinglet connections to zookeeper for no good reason //cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) //log.Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) hostNetworkSources, err := qinglet.GetValidatedSources(strings.Split(s.HostNetworkSources, ",")) if err != nil { return err } tlsOptions, err := s.InitializeTLS() if err != nil { return err } mounter := mount.New() if s.Containerized { log.V(2).Info("Running qinglet in containerized mode (experimental)") mounter = &mount.NsenterMounter{} } var dockerExecHandler dockertools.ExecHandler switch s.DockerExecHandlerName { case "native": dockerExecHandler = &dockertools.NativeExecHandler{} case "nsenter": dockerExecHandler = &dockertools.NsenterExecHandler{} default: log.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName) dockerExecHandler = &dockertools.NativeExecHandler{} } kcfg := app.QingletConfig{ Address: s.Address, AllowPrivileged: s.AllowPrivileged, HostNetworkSources: hostNetworkSources, HostnameOverride: s.HostnameOverride, RootDirectory: s.RootDirectory, // ConfigFile: "" // ManifestURL: "" FileCheckFrequency: s.FileCheckFrequency, // HTTPCheckFrequency PodInfraContainerImage: s.PodInfraContainerImage, SyncFrequency: s.SyncFrequency, RegistryPullQPS: s.RegistryPullQPS, RegistryBurst: s.RegistryBurst, MinimumGCAge: s.MinimumGCAge, MaxPerPodContainerCount: s.MaxPerPodContainerCount, MaxContainerCount: s.MaxContainerCount, RegisterNode: s.RegisterNode, // StandaloneMode: false ClusterDomain: s.ClusterDomain, ClusterDNS: s.ClusterDNS, Runonce: s.RunOnce, Port: s.Port, ReadOnlyPort: s.ReadOnlyPort, CadvisorInterface: cadvisorInterface, EnableServer: s.EnableServer, EnableDebuggingHandlers: s.EnableDebuggingHandlers, DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), QingClient: apiclient, MasterServiceNamespace: s.MasterServiceNamespace, VolumePlugins: app.ProbeVolumePlugins(), NetworkPlugins: app.ProbeNetworkPlugins(), NetworkPluginName: s.NetworkPluginName, StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout, TLSOptions: tlsOptions, ImageGCPolicy: imageGCPolicy, DiskSpacePolicy: diskSpacePolicy, Cloud: nil, // TODO(jdef) Cloud, specifying null here because we don't want all qinglets polling mesos-master; need to account for this in the cloudprovider impl NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency, ResourceContainer: s.ResourceContainer, CgroupRoot: s.CgroupRoot, ContainerRuntime: s.ContainerRuntime, Mounter: mounter, DockerDaemonContainer: s.DockerDaemonContainer, SystemContainer: s.SystemContainer, ConfigureCBR0: s.ConfigureCBR0, MaxPods: s.MaxPods, DockerExecHandler: dockerExecHandler, } kcfg.NodeName = kcfg.Hostname err = app.RunQinglet(&kcfg, app.QingletBuilder(func(kc *app.QingletConfig) (app.QingletBootstrap, *kconfig.PodConfig, error) { return s.createAndInitQinglet(kc, hks, clientConfig, shutdownCloser) })) if err != nil { return err } if s.HealthzPort > 0 { healthz.DefaultHealthz() go util.Forever(func() { err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil) if err != nil { log.Errorf("Starting health server failed: %v", err) } }, 5*time.Second) } // block until executor is shut down or commits shutdown select {} }
// Run runs the specified QingletServer. This should never exit. func (s *QingletServer) Run(_ []string) error { util.ReallyCrash = s.ReallyCrashForTesting rand.Seed(time.Now().UTC().UnixNano()) // TODO(vmarmol): Do this through container config. if err := util.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil { glog.Warning(err) } var apiclient *client.Client clientConfig, err := s.CreateAPIServerClientConfig() if err == nil { apiclient, err = client.New(clientConfig) } if err != nil && len(s.APIServerList) > 0 { glog.Warningf("No API client: %v", err) } glog.V(2).Infof("Using root directory: %v", s.RootDirectory) credentialprovider.SetPreferredDockercfgPath(s.RootDirectory) cadvisorInterface, err := cadvisor.New(s.CadvisorPort) if err != nil { return err } imageGCPolicy := qinglet.ImageGCPolicy{ HighThresholdPercent: s.ImageGCHighThresholdPercent, LowThresholdPercent: s.ImageGCLowThresholdPercent, } diskSpacePolicy := qinglet.DiskSpacePolicy{ DockerFreeDiskMB: s.LowDiskSpaceThresholdMB, RootFreeDiskMB: s.LowDiskSpaceThresholdMB, } cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) hostNetworkSources, err := qinglet.GetValidatedSources(strings.Split(s.HostNetworkSources, ",")) if err != nil { return err } tlsOptions, err := s.InitializeTLS() if err != nil { return err } mounter := mount.New() if s.Containerized { glog.V(2).Info("Running qinglet in containerized mode (experimental)") mounter = &mount.NsenterMounter{} } var dockerExecHandler dockertools.ExecHandler switch s.DockerExecHandlerName { case "native": dockerExecHandler = &dockertools.NativeExecHandler{} case "nsenter": dockerExecHandler = &dockertools.NsenterExecHandler{} default: glog.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName) dockerExecHandler = &dockertools.NativeExecHandler{} } kcfg := QingletConfig{ Address: s.Address, AllowPrivileged: s.AllowPrivileged, HostNetworkSources: hostNetworkSources, HostnameOverride: s.HostnameOverride, RootDirectory: s.RootDirectory, ConfigFile: s.Config, ManifestURL: s.ManifestURL, FileCheckFrequency: s.FileCheckFrequency, HTTPCheckFrequency: s.HTTPCheckFrequency, PodInfraContainerImage: s.PodInfraContainerImage, SyncFrequency: s.SyncFrequency, RegistryPullQPS: s.RegistryPullQPS, RegistryBurst: s.RegistryBurst, MinimumGCAge: s.MinimumGCAge, MaxPerPodContainerCount: s.MaxPerPodContainerCount, MaxContainerCount: s.MaxContainerCount, RegisterNode: s.RegisterNode, StandaloneMode: (len(s.APIServerList) == 0), ClusterDomain: s.ClusterDomain, ClusterDNS: s.ClusterDNS, Runonce: s.RunOnce, Port: s.Port, ReadOnlyPort: s.ReadOnlyPort, CadvisorInterface: cadvisorInterface, EnableServer: s.EnableServer, EnableDebuggingHandlers: s.EnableDebuggingHandlers, DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), QingClient: apiclient, MasterServiceNamespace: s.MasterServiceNamespace, VolumePlugins: ProbeVolumePlugins(), NetworkPlugins: ProbeNetworkPlugins(), NetworkPluginName: s.NetworkPluginName, StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout, TLSOptions: tlsOptions, ImageGCPolicy: imageGCPolicy, DiskSpacePolicy: diskSpacePolicy, Cloud: cloud, NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency, ResourceContainer: s.ResourceContainer, CgroupRoot: s.CgroupRoot, ContainerRuntime: s.ContainerRuntime, Mounter: mounter, DockerDaemonContainer: s.DockerDaemonContainer, SystemContainer: s.SystemContainer, ConfigureCBR0: s.ConfigureCBR0, PodCIDR: s.PodCIDR, MaxPods: s.MaxPods, DockerExecHandler: dockerExecHandler, } if err := RunQinglet(&kcfg, nil); err != nil { return err } if s.HealthzPort > 0 { healthz.DefaultHealthz() go util.Forever(func() { err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil) if err != nil { glog.Errorf("Starting health server failed: %v", err) } }, 5*time.Second) } if s.RunOnce { return nil } // run forever select {} }
func main() { flag.Parse() glog.Infof("Starting serve_hostnames soak test with queries=%d and podsPerNode=%d upTo=%d", *queriesAverage, *podsPerNode, *upTo) var spec string if *gke != "" { spec = filepath.Join(os.Getenv("HOME"), ".config", "gcloud", "qingyuan", "qingconfig") } else { spec = filepath.Join(os.Getenv("HOME"), ".qing", "config") } settings, err := clientcmd.LoadFromFile(spec) if err != nil { glog.Fatalf("Error loading configuration: %v", err.Error()) } if *gke != "" { settings.CurrentContext = *gke } config, err := clientcmd.NewDefaultClientConfig(*settings, &clientcmd.ConfigOverrides{}).ClientConfig() if err != nil { glog.Fatalf("Failed to construct config: %v", err) } c, err := client.New(config) if err != nil { glog.Fatalf("Failed to make client: %v", err) } var nodes *api.NodeList for start := time.Now(); time.Since(start) < nodeListTimeout; time.Sleep(2 * time.Second) { nodes, err = c.Nodes().List(labels.Everything(), fields.Everything()) if err == nil { break } glog.Warningf("Failed to list nodes: %v", err) } if err != nil { glog.Fatalf("Giving up trying to list nodes: %v", err) } if len(nodes.Items) == 0 { glog.Fatalf("Failed to find any nodes.") } glog.Infof("Found %d nodes on this cluster:", len(nodes.Items)) for i, node := range nodes.Items { glog.Infof("%d: %s", i, node.Name) } queries := *queriesAverage * len(nodes.Items) * *podsPerNode // Create the namespace got, err := c.Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{GenerateName: "serve-hostnames-"}}) if err != nil { glog.Fatalf("Failed to create namespace: %v", err) } ns := got.Name defer func(ns string) { if err := c.Namespaces().Delete(ns); err != nil { glog.Warningf("Failed to delete namespace ns: %e", ns, err) } }(ns) glog.Infof("Created namespace %s", ns) // Create a service for these pods. glog.Infof("Creating service %s/serve-hostnames", ns) // Make several attempts to create a service. var svc *api.Service for start := time.Now(); time.Since(start) < serviceCreateTimeout; time.Sleep(2 * time.Second) { t := time.Now() svc, err = c.Services(ns).Create(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: "serve-hostnames", Labels: map[string]string{ "name": "serve-hostname", }, }, Spec: api.ServiceSpec{ Ports: []api.ServicePort{{ Protocol: "TCP", Port: 9376, TargetPort: util.NewIntOrStringFromInt(9376), }}, Selector: map[string]string{ "name": "serve-hostname", }, }, }) glog.V(4).Infof("Service create %s/server-hostnames took %v", ns, time.Since(t)) if err == nil { break } glog.Warningf("After %v failed to create service %s/serve-hostnames: %v", time.Since(start), ns, err) } if err != nil { glog.Warningf("Unable to create service %s/%s: %v", ns, svc.Name, err) return } // Clean up service defer func() { glog.Infof("Cleaning up service %s/serve-hostnames", ns) // Make several attempts to delete the service. for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { if err := c.Services(ns).Delete(svc.Name); err == nil { return } glog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err) } }() // Put serve-hostname pods on each node. podNames := []string{} for i, node := range nodes.Items { for j := 0; j < *podsPerNode; j++ { podName := fmt.Sprintf("serve-hostname-%d-%d", i, j) podNames = append(podNames, podName) // Make several attempts for start := time.Now(); time.Since(start) < podCreateTimeout; time.Sleep(2 * time.Second) { glog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name) t := time.Now() _, err = c.Pods(ns).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: podName, Labels: map[string]string{ "name": "serve-hostname", }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "serve-hostname", Image: "qingyuan/serve_hostname:1.1", Ports: []api.ContainerPort{{ContainerPort: 9376}}, }, }, NodeName: node.Name, }, }) glog.V(4).Infof("Pod create %s/%s request took %v", ns, podName, time.Since(t)) if err == nil { break } glog.Warningf("After %s failed to create pod %s/%s: %v", time.Since(start), ns, podName, err) } if err != nil { glog.Warningf("Failed to create pod %s/%s: %v", ns, podName, err) return } } } // Clean up the pods defer func() { glog.Info("Cleaning up pods") // Make several attempts to delete the pods. for _, podName := range podNames { for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { if err = c.Pods(ns).Delete(podName, nil); err == nil { break } glog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err) } } }() glog.Info("Waiting for the serve-hostname pods to be ready") for _, podName := range podNames { var pod *api.Pod for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) { pod, err = c.Pods(ns).Get(podName) if err != nil { glog.Warningf("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout) continue } if pod.Status.Phase == api.PodRunning { break } } if pod.Status.Phase != api.PodRunning { glog.Warningf("Gave up waiting on pod %s/%s to be running (saw %v)", ns, podName, pod.Status.Phase) } else { glog.Infof("%s/%s is running", ns, podName) } } // Wait for the endpoints to propagate. for start := time.Now(); time.Since(start) < endpointTimeout; time.Sleep(10 * time.Second) { hostname, err := c.Get(). Namespace(ns). Prefix("proxy"). Resource("services"). Name("serve-hostnames"). DoRaw() if err != nil { glog.Infof("After %v while making a proxy call got error %v", time.Since(start), err) continue } var r api.Status if err := api.Scheme.DecodeInto(hostname, &r); err != nil { break } if r.Status == api.StatusFailure { glog.Infof("After %v got status %v", time.Since(start), string(hostname)) continue } break } // Repeatedly make requests. for iteration := 0; iteration != *upTo; iteration++ { responseChan := make(chan string, queries) // Use a channel of size *maxPar to throttle the number // of in-flight requests to avoid overloading the service. inFlight := make(chan struct{}, *maxPar) start := time.Now() for q := 0; q < queries; q++ { go func(i int, query int) { inFlight <- struct{}{} t := time.Now() hostname, err := c.Get(). Namespace(ns). Prefix("proxy"). Resource("services"). Name("serve-hostnames"). DoRaw() glog.V(4).Infof("Proxy call in namespace %s took %v", ns, time.Since(t)) if err != nil { glog.Warningf("Call failed during iteration %d query %d : %v", i, query, err) // If the query failed return a string which starts with a character // that can't be part of a hostname. responseChan <- fmt.Sprintf("!failed in iteration %d to issue query %d: %v", i, query, err) } else { responseChan <- string(hostname) } <-inFlight }(iteration, q) } responses := make(map[string]int, *podsPerNode*len(nodes.Items)) missing := 0 for q := 0; q < queries; q++ { r := <-responseChan glog.V(4).Infof("Got response from %s", r) responses[r]++ // If the returned hostname starts with '!' then it indicates // an error response. if len(r) > 0 && r[0] == '!' { glog.V(3).Infof("Got response %s", r) missing++ } } if missing > 0 { glog.Warningf("Missing %d responses out of %d", missing, queries) } // Report any nodes that did not respond. for n, node := range nodes.Items { for i := 0; i < *podsPerNode; i++ { name := fmt.Sprintf("serve-hostname-%d-%d", n, i) if _, ok := responses[name]; !ok { glog.Warningf("No response from pod %s on node %s at iteration %d", name, node.Name, iteration) } } } glog.Infof("Iteration %d took %v for %d queries (%.2f QPS) with %d missing", iteration, time.Since(start), queries-missing, float64(queries-missing)/time.Since(start).Seconds(), missing) } }
// Run runs the specified ProxyServer. This should never exit. func (s *ProxyServer) Run(_ []string) error { // TODO(vmarmol): Use container config for this. if err := util.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil { glog.V(2).Info(err) } // Run in its own container. if err := util.RunInResourceContainer(s.ResourceContainer); err != nil { glog.Warningf("Failed to start in resource-only container %q: %v", s.ResourceContainer, err) } else { glog.V(2).Infof("Running in resource-only container %q", s.ResourceContainer) } serviceConfig := config.NewServiceConfig() endpointsConfig := config.NewEndpointsConfig() protocol := iptables.ProtocolIpv4 if net.IP(s.BindAddress).To4() == nil { protocol = iptables.ProtocolIpv6 } loadBalancer := proxy.NewLoadBalancerRR() proxier, err := proxy.NewProxier(loadBalancer, net.IP(s.BindAddress), iptables.New(exec.New(), protocol), s.PortRange) if err != nil { glog.Fatalf("Unable to create proxer: %v", err) } // Wire proxier to handle changes to services serviceConfig.RegisterHandler(proxier) // And wire loadBalancer to handle changes to endpoints to services endpointsConfig.RegisterHandler(loadBalancer) // Note: RegisterHandler() calls need to happen before creation of Sources because sources // only notify on changes, and the initial update (on process start) may be lost if no handlers // are registered yet. // define api config source if s.Qingconfig == "" && s.Master == "" { glog.Warningf("Neither --qingconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified qingconfig // file, and then overriding the Master flag, if non-empty. qingconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Qingconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() if err != nil { return err } client, err := client.New(qingconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } config.NewSourceAPI( client.Services(api.NamespaceAll), client.Endpoints(api.NamespaceAll), 30*time.Second, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), ) if s.HealthzPort > 0 { go util.Forever(func() { err := http.ListenAndServe(s.HealthzBindAddress.String()+":"+strconv.Itoa(s.HealthzPort), nil) if err != nil { glog.Errorf("Starting health server failed: %v", err) } }, 5*time.Second) } // Just loop forever for now... proxier.SyncLoop() return nil }
// Run runs the specified APIServer. This should never exit. func (s *APIServer) Run(_ []string) error { s.verifyClusterIPFlags() // If advertise-address is not specified, use bind-address. If bind-address // is also unset (or 0.0.0.0), setDefaults() in pkg/master/master.go will // do the right thing and use the host's default interface. if s.AdvertiseAddress == nil || net.IP(s.AdvertiseAddress).IsUnspecified() { s.AdvertiseAddress = s.BindAddress } if (s.EtcdConfigFile != "" && len(s.EtcdServerList) != 0) || (s.EtcdConfigFile == "" && len(s.EtcdServerList) == 0) { glog.Fatalf("specify either --etcd-servers or --etcd-config") } capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: s.AllowPrivileged, // TODO(vmarmol): Implement support for HostNetworkSources. HostNetworkSources: []string{}, }) cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) qingletClient, err := client.NewQingletClient(&s.QingletConfig) if err != nil { glog.Fatalf("Failure to start qinglet client: %v", err) } // "api/all=false" allows users to selectively enable specific api versions. disableAllAPIs := false allAPIFlagValue, ok := s.RuntimeConfig["api/all"] if ok && allAPIFlagValue == "false" { disableAllAPIs = true } // "api/legacy=false" allows users to disable legacy api versions. // Right now, v1beta1 and v1beta2 are considered legacy. disableLegacyAPIs := false legacyAPIFlagValue, ok := s.RuntimeConfig["api/legacy"] if ok && legacyAPIFlagValue == "false" { disableLegacyAPIs = true } _ = disableLegacyAPIs // hush the compiler while we don't have legacy APIs to disable. // "api/v1beta3={true|false} allows users to enable/disable v1beta3 API. // This takes preference over api/all and api/legacy, if specified. disableV1beta3 := disableAllAPIs disableV1beta3 = !s.getRuntimeConfigValue("api/v1beta3", !disableV1beta3) // "api/v1={true|false} allows users to enable/disable v1 API. // This takes preference over api/all and api/legacy, if specified. disableV1 := disableAllAPIs disableV1 = !s.getRuntimeConfigValue("api/v1", !disableV1) // TODO: expose same flags as client.BindClientConfigFlags but for a server clientConfig := &client.Config{ Host: net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)), Version: s.StorageVersion, } client, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid server address: %v", err) } helper, err := newEtcd(s.EtcdConfigFile, s.EtcdServerList, s.StorageVersion, s.EtcdPathPrefix) if err != nil { glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err) } // TODO Is this the right place for migration to happen? Must *both* old and // new etcd prefix params be supplied for this to be valid? if s.OldEtcdPathPrefix != "" { if err = helper.MigrateKeys(s.OldEtcdPathPrefix); err != nil { glog.Fatalf("Migration of old etcd keys failed: %v", err) } } n := net.IPNet(s.ServiceClusterIPRange) // Default to the private server key for service account token signing if s.ServiceAccountKeyFile == "" && s.TLSPrivateKeyFile != "" { if apiserver.IsValidServiceAccountKeyFile(s.TLSPrivateKeyFile) { s.ServiceAccountKeyFile = s.TLSPrivateKeyFile } else { glog.Warning("no RSA key provided, service account token authentication disabled") } } authenticator, err := apiserver.NewAuthenticator(s.BasicAuthFile, s.ClientCAFile, s.TokenAuthFile, s.ServiceAccountKeyFile, s.ServiceAccountLookup, helper) if err != nil { glog.Fatalf("Invalid Authentication Config: %v", err) } authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(s.AuthorizationMode, s.AuthorizationPolicyFile) if err != nil { glog.Fatalf("Invalid Authorization Config: %v", err) } admissionControlPluginNames := strings.Split(s.AdmissionControl, ",") admissionController := admission.NewFromPlugins(client, admissionControlPluginNames, s.AdmissionControlConfigFile) if len(s.ExternalHost) == 0 { // TODO: extend for other providers if s.CloudProvider == "gce" { instances, supported := cloud.Instances() if !supported { glog.Fatalf("gce cloud provider has no instances. this shouldn't happen. exiting.") } name, err := os.Hostname() if err != nil { glog.Fatalf("failed to get hostname: %v", err) } addrs, err := instances.NodeAddresses(name) if err != nil { glog.Warningf("unable to obtain external host address from cloud provider: %v", err) } else { for _, addr := range addrs { if addr.Type == api.NodeExternalIP { s.ExternalHost = addr.Address } } } } } var installSSH master.InstallSSHKey if cloud != nil { if instances, supported := cloud.Instances(); supported { installSSH = instances.AddSSHKeyToAllInstances } } config := &master.Config{ EtcdHelper: helper, EventTTL: s.EventTTL, QingletClient: qingletClient, ServiceClusterIPRange: &n, EnableCoreControllers: true, EnableLogsSupport: s.EnableLogsSupport, EnableUISupport: true, EnableSwaggerSupport: true, EnableProfiling: s.EnableProfiling, EnableIndex: true, APIPrefix: s.APIPrefix, CorsAllowedOriginList: s.CorsAllowedOriginList, ReadWritePort: s.SecurePort, PublicAddress: net.IP(s.AdvertiseAddress), Authenticator: authenticator, SupportsBasicAuth: len(s.BasicAuthFile) > 0, Authorizer: authorizer, AdmissionControl: admissionController, DisableV1Beta3: disableV1beta3, DisableV1: disableV1, MasterServiceNamespace: s.MasterServiceNamespace, ClusterName: s.ClusterName, ExternalHost: s.ExternalHost, MinRequestTimeout: s.MinRequestTimeout, SSHUser: s.SSHUser, SSHKeyfile: s.SSHKeyfile, InstallSSHKey: installSSH, ServiceNodePortRange: s.ServiceNodePortRange, } m := master.New(config) // We serve on 2 ports. See docs/accessing_the_api.md secureLocation := "" if s.SecurePort != 0 { secureLocation = net.JoinHostPort(s.BindAddress.String(), strconv.Itoa(s.SecurePort)) } insecureLocation := net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)) // See the flag commentary to understand our assumptions when opening the read-only and read-write ports. var sem chan bool if s.MaxRequestsInFlight > 0 { sem = make(chan bool, s.MaxRequestsInFlight) } longRunningRE := regexp.MustCompile(s.LongRunningRequestRE) if secureLocation != "" { secureServer := &http.Server{ Addr: secureLocation, Handler: apiserver.MaxInFlightLimit(sem, longRunningRE, apiserver.RecoverPanics(m.Handler)), ReadTimeout: ReadWriteTimeout, WriteTimeout: ReadWriteTimeout, MaxHeaderBytes: 1 << 20, TLSConfig: &tls.Config{ // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) MinVersion: tls.VersionTLS10, }, } if len(s.ClientCAFile) > 0 { clientCAs, err := util.CertPoolFromFile(s.ClientCAFile) if err != nil { glog.Fatalf("unable to load client CA file: %v", err) } // Populate PeerCertificates in requests, but don't reject connections without certificates // This allows certificates to be validated by authenticators, while still allowing other auth types secureServer.TLSConfig.ClientAuth = tls.RequestClientCert // Specify allowed CAs for client certificates secureServer.TLSConfig.ClientCAs = clientCAs } glog.Infof("Serving securely on %s", secureLocation) go func() { defer util.HandleCrash() for { if s.TLSCertFile == "" && s.TLSPrivateKeyFile == "" { s.TLSCertFile = path.Join(s.CertDirectory, "apiserver.crt") s.TLSPrivateKeyFile = path.Join(s.CertDirectory, "apiserver.key") // TODO (cjcullen): Is PublicAddress the right address to sign a cert with? if err := util.GenerateSelfSignedCert(config.PublicAddress.String(), s.TLSCertFile, s.TLSPrivateKeyFile); err != nil { glog.Errorf("Unable to generate self signed cert: %v", err) } else { glog.Infof("Using self-signed cert (%s, %s)", s.TLSCertFile, s.TLSPrivateKeyFile) } } // err == systemd.SdNotifyNoSocket when not running on a systemd system if err := systemd.SdNotify("READY=1\n"); err != nil && err != systemd.SdNotifyNoSocket { glog.Errorf("Unable to send systemd daemon sucessful start message: %v\n", err) } if err := secureServer.ListenAndServeTLS(s.TLSCertFile, s.TLSPrivateKeyFile); err != nil { glog.Errorf("Unable to listen for secure (%v); will try again.", err) } time.Sleep(15 * time.Second) } }() } http := &http.Server{ Addr: insecureLocation, Handler: apiserver.RecoverPanics(m.InsecureHandler), ReadTimeout: ReadWriteTimeout, WriteTimeout: ReadWriteTimeout, MaxHeaderBytes: 1 << 20, } if secureLocation == "" { // err == systemd.SdNotifyNoSocket when not running on a systemd system if err := systemd.SdNotify("READY=1\n"); err != nil && err != systemd.SdNotifyNoSocket { glog.Errorf("Unable to send systemd daemon sucessful start message: %v\n", err) } } glog.Infof("Serving insecurely on %s", insecureLocation) glog.Fatal(http.ListenAndServe()) return nil }