func TestPollMinions(t *testing.T) { table := []struct { minions []api.Minion }{ { minions: []api.Minion{ {JSONBase: api.JSONBase{ID: "foo"}}, {JSONBase: api.JSONBase{ID: "bar"}}, }, }, } for _, item := range table { ml := &api.MinionList{Items: item.minions} handler := util.FakeHandler{ StatusCode: 200, ResponseBody: api.EncodeOrDie(ml), T: t, } server := httptest.NewServer(&handler) cf := ConfigFactory{client.New(server.URL, nil)} ce, err := cf.pollMinions() if err != nil { t.Errorf("Unexpected error: %v", err) continue } handler.ValidateRequest(t, "/api/v1beta1/minions", "GET", nil) if e, a := len(item.minions), ce.Len(); e != a { t.Errorf("Expected %v, got %v", e, a) } } }
func TestSyncEndpointsPodError(t *testing.T) { fakeHandler := util.FakeHandler{ StatusCode: 500, } testServer := httptest.NewTLSServer(&fakeHandler) client := client.New(testServer.URL, nil) serviceRegistry := MockServiceRegistry{ list: api.ServiceList{ Items: []api.Service{ { Selector: map[string]string{ "foo": "bar", }, }, }, }, } endpoints := MakeEndpointController(&serviceRegistry, client) err := endpoints.SyncServiceEndpoints() if err == nil { t.Error("Unexpected non-error") } }
func TestBind(t *testing.T) { table := []struct { binding *api.Binding }{ {binding: &api.Binding{PodID: "foo", Host: "foohost.kubernetes.mydomain.com"}}, } for _, item := range table { handler := util.FakeHandler{ StatusCode: 200, ResponseBody: "", T: t, } server := httptest.NewServer(&handler) b := binder{client.New(server.URL, nil)} err := b.Bind(item.binding) if err != nil { t.Errorf("Unexpected error: %v", err) continue } expectedBody := api.EncodeOrDie(item.binding) handler.ValidateRequest(t, "/api/v1beta1/bindings", "POST", &expectedBody) } }
func TestDefaultErrorFunc(t *testing.T) { testPod := &api.Pod{JSONBase: api.JSONBase{ID: "foo"}} handler := util.FakeHandler{ StatusCode: 200, ResponseBody: api.EncodeOrDie(testPod), T: t, } server := httptest.NewServer(&handler) factory := ConfigFactory{client.New(server.URL, nil)} queue := cache.NewFIFO() errFunc := factory.makeDefaultErrorFunc(queue) errFunc(testPod, nil) for { // This is a terrible way to do this but I plan on replacing this // whole error handling system in the future. The test will time // out if something doesn't work. time.Sleep(10 * time.Millisecond) got, exists := queue.Get("foo") if !exists { continue } handler.ValidateRequest(t, "/api/v1beta1/pods/foo", "GET", nil) if e, a := testPod, got; !reflect.DeepEqual(e, a) { t.Errorf("Expected %v, got %v", e, a) } break } }
func TestReflector_startWatch(t *testing.T) { table := []struct{ resource, path string }{ {"pods", "/api/v1beta1/pods/watch"}, {"services", "/api/v1beta1/services/watch"}, } for _, testItem := range table { got := make(chan struct{}) srv := httptest.NewServer(http.HandlerFunc( func(w http.ResponseWriter, req *http.Request) { w.WriteHeader(http.StatusNotFound) if req.URL.Path == testItem.path { close(got) return } t.Errorf("unexpected path %v", req.URL.Path) })) s := NewStore() c := client.New(srv.URL, nil) g := NewReflector(testItem.resource, c, &api.Pod{}, s) _, err := g.startWatch() // We're just checking that it watches the right path. if err == nil { t.Errorf("unexpected non-error") } <-got } }
func TestHandleWatchResponseDelete(t *testing.T) { body, _ := json.Marshal(makePodList(2)) fakeHandler := util.FakeHandler{ StatusCode: 200, ResponseBody: string(body), } testServer := httptest.NewTLSServer(&fakeHandler) client := client.New(testServer.URL, nil) fakePodControl := FakePodControl{} manager := MakeReplicationManager(nil, client) manager.podControl = &fakePodControl controller := makeReplicationController(2) data, err := json.Marshal(controller) expectNoError(t, err) controllerOut, err := manager.handleWatchResponse(&etcd.Response{ Action: "delete", PrevNode: &etcd.Node{ Value: string(data), }, }) if err != nil { t.Errorf("Unexpected error: %#v", err) } if !reflect.DeepEqual(controller, *controllerOut) { t.Errorf("Unexpected mismatch. Expected %#v, Saw: %#v", controller, controllerOut) } }
func loadClientOrDie() *client.Client { config := client.Config{ Host: *host, } info, err := clientauth.LoadFromFile(*authConfig) if err != nil { glog.Fatalf("Error loading auth: %v", err) } // If the certificate directory is provided, set the cert paths to be there. if *certDir != "" { glog.Infof("Expecting certs in %v.", *certDir) info.CAFile = filepath.Join(*certDir, "ca.crt") info.CertFile = filepath.Join(*certDir, "kubecfg.crt") info.KeyFile = filepath.Join(*certDir, "kubecfg.key") } config, err = info.MergeWithConfig(config) if err != nil { glog.Fatalf("Error creating client") } c, err := client.New(&config) if err != nil { glog.Fatalf("Error creating client") } return c }
func (c *config) getKubeClient() *kubeclient.Client { kubeClient, err := kubeclient.New("http://"+c.ListenAddr, nil) if err != nil { glog.Fatalf("Unable to configure client - bad URL: %v", err) } return kubeClient }
func GetClientForUser(clientConfig kclient.Config, username string) (*client.Client, *kclient.Client, *kclient.Config, error) { token, err := tokencmd.RequestToken(&clientConfig, nil, username, "password") if err != nil { return nil, nil, nil, err } userClientConfig := clientConfig userClientConfig.BearerToken = token userClientConfig.Username = "" userClientConfig.Password = "" userClientConfig.TLSClientConfig.CertFile = "" userClientConfig.TLSClientConfig.KeyFile = "" userClientConfig.TLSClientConfig.CertData = nil userClientConfig.TLSClientConfig.KeyData = nil kubeClient, err := kclient.New(&userClientConfig) if err != nil { return nil, nil, nil, err } osClient, err := client.New(&userClientConfig) if err != nil { return nil, nil, nil, err } return osClient, kubeClient, &userClientConfig, nil }
// TODO(jdef): hacked from kubelet/server/server.go // TODO(k8s): replace this with clientcmd func (s *SchedulerServer) createAPIServerClient() (*client.Client, error) { authInfo, err := clientauth.LoadFromFile(s.AuthPath) if err != nil { log.Warningf("Could not load kubernetes auth path: %v. Continuing with defaults.", err) } if authInfo == nil { // authInfo didn't load correctly - continue with defaults. authInfo = &clientauth.Info{} } clientConfig, err := authInfo.MergeWithConfig(client.Config{}) if err != nil { return nil, err } if len(s.APIServerList) < 1 { return nil, fmt.Errorf("no api servers specified") } // TODO: adapt Kube client to support LB over several servers if len(s.APIServerList) > 1 { log.Infof("Multiple api servers specified. Picking first one") } clientConfig.Host = s.APIServerList[0] c, err := client.New(&clientConfig) if err != nil { return nil, err } return c, nil }
// TODO: evaluate using pkg/client/clientcmd func newKubeClient() (*kclient.Client, error) { var ( config *kclient.Config err error masterURL string ) if *argKubeMasterURL != "" { masterURL, err = getKubeMasterURL() if err != nil { return nil, err } } if *argKubecfgFile == "" { if masterURL == "" { return nil, fmt.Errorf("--kube_master_url must be set when --kubecfg_file is not set") } config = &kclient.Config{ Host: masterURL, Version: "v1", } } else { overrides := &kclientcmd.ConfigOverrides{} if masterURL != "" { overrides.ClusterInfo.Server = masterURL } if config, err = kclientcmd.NewNonInteractiveDeferredLoadingClientConfig( &kclientcmd.ClientConfigLoadingRules{ExplicitPath: *argKubecfgFile}, overrides).ClientConfig(); err != nil { return nil, err } } glog.Infof("Using %s for kubernetes master", config.Host) glog.Infof("Using kubernetes API %s", config.Version) return kclient.New(config) }
func k8sClientFactory() *k8sClient { if len(*addr) > 0 && len(*user) > 0 && len(*pword) > 0 { config := client.Config{ Host: *addr, Username: *user, Password: *pword, Insecure: true, } return &k8sClient{client.NewOrDie(&config)} } else { kubernetesService := os.Getenv("KUBERNETES_SERVICE_HOST") if kubernetesService == "" { glog.Fatalf("Please specify the Kubernetes server with --server") } apiServer := fmt.Sprintf("https://%s:%s", kubernetesService, os.Getenv("KUBERNETES_SERVICE_PORT")) token, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token") if err != nil { glog.Fatalf("No service account token found") } config := client.Config{ Host: apiServer, BearerToken: string(token), Insecure: true, } c, err := client.New(&config) if err != nil { glog.Fatalf("Failed to make client: %v", err) } return &k8sClient{c} } }
func main() { util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if hadoopConfDir == nil || *hadoopConfDir == "" { glog.Fatalf("HADOOP_CONF_DIR not set!") } os.Setenv("HADOOP_CONF_DIR", *hadoopConfDir) kubeClient, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } record.StartRecording(kubeClient.Events(""), api.EventSource{Component: "scheduler"}) go http.ListenAndServe(net.JoinHostPort(address.String(), strconv.Itoa(*port)), nil) configFactory := factory.NewConfigFactory(kubeClient) config, err := createConfig(configFactory) if err != nil { glog.Fatalf("Failed to create scheduler configuration: %v", err) } s := scheduler.New(config) s.Run() select {} }
func TestSyncEndpointsItems(t *testing.T) { body, _ := json.Marshal(makePodList(1)) fakeHandler := util.FakeHandler{ StatusCode: 200, ResponseBody: string(body), } testServer := httptest.NewTLSServer(&fakeHandler) client := client.New(testServer.URL, nil) serviceRegistry := MockServiceRegistry{ list: api.ServiceList{ Items: []api.Service{ { Selector: map[string]string{ "foo": "bar", }, }, }, }, } endpoints := MakeEndpointController(&serviceRegistry, client) err := endpoints.SyncServiceEndpoints() if err != nil { t.Errorf("unexpected error: %v", err) } if len(serviceRegistry.endpoints.Endpoints) != 1 { t.Errorf("Unexpected endpoints update: %#v", serviceRegistry.endpoints) } }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() kubeClient, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } record.StartRecording(kubeClient.Events(""), api.EventSource{Component: "scheduler"}) go http.ListenAndServe(net.JoinHostPort(address.String(), strconv.Itoa(*port)), nil) configFactory := factory.NewConfigFactory(kubeClient) config, err := configFactory.Create() if err != nil { glog.Fatalf("Failed to create scheduler configuration: %v", err) } s := scheduler.New(config) s.Run() select {} }
// Clients returns an OpenShift and Kubernetes client with the credentials of the named service account // TODO: change return types to client.Interface/kclient.Interface to allow auto-reloading credentials func Clients(config kclient.Config, tokenRetriever TokenRetriever, namespace, name string) (*client.Client, *kclient.Client, error) { // Clear existing auth info config.Username = "" config.Password = "" config.CertFile = "" config.CertData = []byte{} config.KeyFile = "" config.KeyData = []byte{} // For now, just initialize the token once // TODO: refetch the token if the client encounters 401 errors token, err := tokenRetriever.GetToken(namespace, name) if err != nil { return nil, nil, err } config.BearerToken = token c, err := client.New(&config) if err != nil { return nil, nil, err } kc, err := kclient.New(&config) if err != nil { return nil, nil, err } return c, kc, nil }
// Run runs the specified SchedulerServer. This should never exit. func (s *SchedulerServer) Run(_ []string) error { if s.Kubeconfig == "" && s.Master == "" { glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified kubeconfig // file, and then overriding the Master flag, if non-empty. kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() if err != nil { return err } kubeconfig.QPS = 20.0 kubeconfig.Burst = 30 kubeClient, err := client.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() configFactory := factory.NewConfigFactory(kubeClient) config, err := s.createConfig(configFactory) if err != nil { glog.Fatalf("Failed to create scheduler configuration: %v", err) } config.Cloud, err = cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } eventBroadcaster := record.NewBroadcaster() config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}) eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) sched := scheduler.New(config) sched.Run() select {} }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() verifyMinionFlags() if len(etcdServerList) == 0 { glog.Fatalf("-etcd_servers flag is required.") } capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: *allowPrivileged, }) cloud := initCloudProvider(*cloudProvider, *cloudConfigFile) podInfoGetter := &client.HTTPPodInfoGetter{ Client: http.DefaultClient, Port: *minionPort, } client, err := client.New(net.JoinHostPort(*address, strconv.Itoa(int(*port))), nil) if err != nil { glog.Fatalf("Invalid server address: %v", err) } m := master.New(&master.Config{ Client: client, Cloud: cloud, EtcdServers: etcdServerList, HealthCheckMinions: *healthCheckMinions, Minions: machineList, MinionCacheTTL: *minionCacheTTL, MinionRegexp: *minionRegexp, PodInfoGetter: podInfoGetter, }) storage, codec := m.API_v1beta1() handler := apiserver.Handle(storage, codec, *apiPrefix) if len(corsAllowedOriginList) > 0 { allowedOriginRegexps, err := util.CompileRegexps(corsAllowedOriginList) if err != nil { glog.Fatalf("Invalid CORS allowed origin, --cors_allowed_origins flag was set to %v - %v", strings.Join(corsAllowedOriginList, ","), err) } handler = apiserver.CORS(handler, allowedOriginRegexps, nil, nil, "true") } s := &http.Server{ Addr: net.JoinHostPort(*address, strconv.Itoa(int(*port))), Handler: apiserver.RecoverPanics(handler), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, } glog.Fatal(s.ListenAndServe()) }
func main() { flag.Parse() glog.Info("Elasticsearch discovery") apiServer := *server if apiServer == "" { kubernetesService := os.Getenv("KUBERNETES_SERVICE_HOST") if kubernetesService == "" { glog.Fatalf("Please specify the Kubernetes server with --server") } apiServer = fmt.Sprintf("https://%s:%s", kubernetesService, os.Getenv("KUBERNETES_SERVICE_PORT")) } glog.Infof("Server: %s", apiServer) glog.Infof("Namespace: %q", *namespace) glog.Infof("selector: %q", *selector) config := client.Config{ Host: apiServer, BearerToken: *token, Insecure: true, } c, err := client.New(&config) if err != nil { glog.Fatalf("Failed to make client: %v", err) } l, err := labels.Parse(*selector) if err != nil { glog.Fatalf("Failed to parse selector %q: %v", *selector, err) } pods, err := c.Pods(*namespace).List(l, fields.Everything()) if err != nil { glog.Fatalf("Failed to list pods: %v", err) } glog.Infof("Elasticsearch pods in namespace %s with selector %q", *namespace, *selector) podIPs := []string{} for i := range pods.Items { p := &pods.Items[i] for attempt := 0; attempt < 10; attempt++ { glog.Infof("%d: %s PodIP: %s", i, p.Name, p.Status.PodIP) if p.Status.PodIP != "" { podIPs = append(podIPs, fmt.Sprintf(`"%s"`, p.Status.PodIP)) break } time.Sleep(1 * time.Second) p, err = c.Pods(*namespace).Get(p.Name) if err != nil { glog.Warningf("Failed to get pod %s: %v", p.Name, err) } } if p.Status.PodIP == "" { glog.Warningf("Failed to obtain PodIP for %s", p.Name) } } fmt.Printf("discovery.zen.ping.unicast.hosts: [%s]\n", strings.Join(podIPs, ", ")) }
// CloudCfg command line tool. func main() { flag.Usage = func() { usage() } flag.Parse() // Scan the arguments list util.InitLogs() defer util.FlushLogs() if *versionFlag { fmt.Println("Version:", AppVersion) os.Exit(0) } secure := true var masterServer string if len(*httpServer) > 0 { masterServer = *httpServer } else if len(os.Getenv("KUBERNETES_MASTER")) > 0 { masterServer = os.Getenv("KUBERNETES_MASTER") } else { masterServer = "http://localhost:8080" } parsedURL, err := url.Parse(masterServer) if err != nil { glog.Fatalf("Unable to parse %v as a URL\n", err) } if parsedURL.Scheme != "" && parsedURL.Scheme != "https" { secure = false } var auth *kube_client.AuthInfo if secure { auth, err = kubecfg.LoadAuthInfo(*authConfig) if err != nil { glog.Fatalf("Error loading auth: %v", err) } } if *proxy { glog.Info("Starting to serve on localhost:8001") server := kubecfg.NewProxyServer(*www, masterServer, auth) glog.Fatal(server.Serve()) } if len(flag.Args()) < 1 { usage() os.Exit(1) } method := flag.Arg(0) client := kube_client.New(masterServer, auth) matchFound := executeAPIRequest(method, client) || executeControllerRequest(method, client) if matchFound == false { glog.Fatalf("Unknown command %s", method) } }
func startComponents(manifestURL string) (apiServerURL string) { // Setup servers := []string{"http://localhost:4001"} glog.Infof("Creating etcd client pointing to %v", servers) machineList := []string{"localhost", "machine"} handler := delegateHandler{} apiServer := httptest.NewServer(&handler) etcdClient := etcd.NewClient(servers) cl := client.New(apiServer.URL, nil) cl.PollPeriod = time.Second * 1 cl.Sync = true // Master m := master.New(&master.Config{ Client: cl, EtcdServers: servers, Minions: machineList, PodInfoGetter: fakePodInfoGetter{}, }) storage, codec := m.API_v1beta1() handler.delegate = apiserver.Handle(storage, codec, "/api/v1beta1") // Scheduler scheduler.New((&factory.ConfigFactory{cl}).Create()).Run() controllerManager := controller.NewReplicationManager(cl) // Prove that controllerManager's watch works by making it not sync until after this // test is over. (Hopefully we don't take 10 minutes!) controllerManager.Run(10 * time.Minute) // Kubelet (localhost) cfg1 := config.NewPodConfig(config.PodConfigNotificationSnapshotAndUpdates) config.NewSourceEtcd(config.EtcdKeyForHost(machineList[0]), etcdClient, cfg1.Channel("etcd")) config.NewSourceURL(manifestURL, 5*time.Second, cfg1.Channel("url")) myKubelet := kubelet.NewIntegrationTestKubelet(machineList[0], &fakeDocker1) go util.Forever(func() { myKubelet.Run(cfg1.Updates()) }, 0) go util.Forever(func() { kubelet.ListenAndServeKubeletServer(myKubelet, cfg1.Channel("http"), "localhost", 10250) }, 0) // Kubelet (machine) // Create a second kubelet so that the guestbook example's two redis slaves both // have a place they can schedule. cfg2 := config.NewPodConfig(config.PodConfigNotificationSnapshotAndUpdates) config.NewSourceEtcd(config.EtcdKeyForHost(machineList[1]), etcdClient, cfg2.Channel("etcd")) otherKubelet := kubelet.NewIntegrationTestKubelet(machineList[1], &fakeDocker2) go util.Forever(func() { otherKubelet.Run(cfg2.Updates()) }, 0) go util.Forever(func() { kubelet.ListenAndServeKubeletServer(otherKubelet, cfg2.Channel("http"), "localhost", 10251) }, 0) return apiServer.URL }
func NewKubernetesExecutor(url string, username string, password string) (Executor, error) { config := client.Config{Host: url, Username: username, Password: password} client, err := client.New(&config) if err != nil { return KubernetesExecutor{}, err } return KubernetesExecutor{client: client}, nil }
// Run runs the specified ProxyServer. This should never exit. func (s *ProxyServer) Run(_ []string) error { if err := util.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil { glog.Info(err) } serviceConfig := config.NewServiceConfig() endpointsConfig := config.NewEndpointsConfig() protocol := iptables.ProtocolIpv4 if net.IP(s.BindAddress).To4() == nil { protocol = iptables.ProtocolIpv6 } loadBalancer := proxy.NewLoadBalancerRR() proxier := proxy.NewProxier(loadBalancer, net.IP(s.BindAddress), iptables.New(exec.New(), protocol)) if proxier == nil { glog.Fatalf("failed to create proxier, aborting") } // Wire proxier to handle changes to services serviceConfig.RegisterHandler(proxier) // And wire loadBalancer to handle changes to endpoints to services endpointsConfig.RegisterHandler(loadBalancer) // Note: RegisterHandler() calls need to happen before creation of Sources because sources // only notify on changes, and the initial update (on process start) may be lost if no handlers // are registered yet. // define api config source if s.ClientConfig.Host != "" { glog.Infof("Using API calls to get config %v", s.ClientConfig.Host) client, err := client.New(&s.ClientConfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } config.NewSourceAPI( client.Services(api.NamespaceAll), client.Endpoints(api.NamespaceAll), 30*time.Second, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), ) } if s.HealthzPort > 0 { go util.Forever(func() { err := http.ListenAndServe(s.BindAddress.String()+":"+strconv.Itoa(s.HealthzPort), nil) if err != nil { glog.Errorf("Starting health server failed: %v", err) } }, 5*time.Second) } // Just loop forever for now... proxier.SyncLoop() return nil }
func TestCreateReplica(t *testing.T) { body, _ := api.Encode(api.Pod{}) fakeHandler := util.FakeHandler{ StatusCode: 200, ResponseBody: string(body), } testServer := httptest.NewTLSServer(&fakeHandler) client := client.New(testServer.URL, nil) podControl := RealPodControl{ kubeClient: client, } controllerSpec := api.ReplicationController{ JSONBase: api.JSONBase{ Kind: "ReplicationController", }, DesiredState: api.ReplicationControllerState{ PodTemplate: api.PodTemplate{ DesiredState: api.PodState{ Manifest: api.ContainerManifest{ Containers: []api.Container{ { Image: "foo/bar", }, }, }, }, Labels: map[string]string{ "name": "foo", "type": "production", }, }, }, } podControl.createReplica(controllerSpec) expectedPod := api.Pod{ JSONBase: api.JSONBase{ Kind: "Pod", APIVersion: "v1beta1", }, Labels: controllerSpec.DesiredState.PodTemplate.Labels, DesiredState: controllerSpec.DesiredState.PodTemplate.DesiredState, } fakeHandler.ValidateRequest(t, makeURL("/pods"), "POST", nil) actualPod := api.Pod{} if err := json.Unmarshal([]byte(fakeHandler.RequestBody), &actualPod); err != nil { t.Errorf("Unexpected error: %#v", err) } if !reflect.DeepEqual(expectedPod, actualPod) { t.Logf("Body: %s", fakeHandler.RequestBody) t.Errorf("Unexpected mismatch. Expected\n %#v,\n Got:\n %#v", expectedPod, actualPod) } }
func NewProxyServer(filebase, host string, auth *client.AuthInfo) *ProxyServer { server := &ProxyServer{ Host: host, Auth: auth, Client: client.New(host, auth), } http.Handle("/api/", server) http.Handle("/static/", makeFileHandler("/static/", filebase)) return server }
func TestCreate(t *testing.T) { handler := util.FakeHandler{ StatusCode: 500, ResponseBody: "", T: t, } server := httptest.NewServer(&handler) factory := ConfigFactory{client.New(server.URL, nil)} factory.Create() }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() if len(machineList) == 0 { glog.Fatal("No machines specified!") } var cloud cloudprovider.Interface switch *cloudProvider { case "gce": var err error cloud, err = cloudprovider.NewGCECloud() if err != nil { glog.Fatalf("Couldn't connect to GCE cloud: %#v", err) } default: if len(*cloudProvider) > 0 { glog.Infof("Unknown cloud provider: %s", *cloudProvider) } else { glog.Info("No cloud provider specified.") } } podInfoGetter := &client.HTTPPodInfoGetter{ Client: http.DefaultClient, Port: *minionPort, } client := client.New("http://"+net.JoinHostPort(*address, strconv.Itoa(int(*port))), nil) var m *master.Master if len(etcdServerList) > 0 { m = master.New(&master.Config{ Client: client, Cloud: cloud, EtcdServers: etcdServerList, HealthCheckMinions: *healthCheckMinions, Minions: machineList, MinionCacheTTL: *minionCacheTTL, MinionRegexp: *minionRegexp, PodInfoGetter: podInfoGetter, }) } else { m = master.NewMemoryServer(&master.Config{ Client: client, Cloud: cloud, Minions: machineList, PodInfoGetter: podInfoGetter, }) } glog.Fatal(m.Run(net.JoinHostPort(*address, strconv.Itoa(int(*port))), *apiPrefix)) }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() verifyMinionFlags() if len(etcdServerList) == 0 { glog.Fatalf("-etcd_servers flag is required.") } cloud, err := cloudprovider.GetCloudProvider(*cloudProvider) if err != nil { glog.Fatalf("Couldn't init cloud provider %q: %#v", *cloudProvider, err) } if cloud == nil { if len(*cloudProvider) > 0 { glog.Fatalf("Unknown cloud provider: %s", *cloudProvider) } else { glog.Info("No cloud provider specified.") } } podInfoGetter := &client.HTTPPodInfoGetter{ Client: http.DefaultClient, Port: *minionPort, } client, err := client.New(net.JoinHostPort(*address, strconv.Itoa(int(*port))), nil) if err != nil { glog.Fatalf("Invalid server address: %v", err) } m := master.New(&master.Config{ Client: client, Cloud: cloud, EtcdServers: etcdServerList, HealthCheckMinions: *healthCheckMinions, Minions: machineList, MinionCacheTTL: *minionCacheTTL, MinionRegexp: *minionRegexp, PodInfoGetter: podInfoGetter, }) storage, codec := m.API_v1beta1() s := &http.Server{ Addr: net.JoinHostPort(*address, strconv.Itoa(int(*port))), Handler: apiserver.Handle(storage, codec, *apiPrefix), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, } glog.Fatal(s.ListenAndServe()) }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() serviceConfig := config.NewServiceConfig() endpointsConfig := config.NewEndpointsConfig() // define api config source if *master != "" { glog.Infof("Using api calls to get config %v", *master) //TODO: add auth info client, err := client.New(*master, nil) if err != nil { glog.Fatalf("Invalid -master: %v", err) } config.NewSourceAPI( client, 30*time.Second, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), ) } // Create a configuration source that handles configuration from etcd. if len(etcdServerList) > 0 && *master == "" { glog.Infof("Using etcd servers %v", etcdServerList) // Set up logger for etcd client etcd.SetLogger(util.NewLogger("etcd ")) etcdClient := etcd.NewClient(etcdServerList) config.NewConfigSourceEtcd(etcdClient, serviceConfig.Channel("etcd"), endpointsConfig.Channel("etcd")) } // And create a configuration source that reads from a local file config.NewConfigSourceFile(*configFile, serviceConfig.Channel("file"), endpointsConfig.Channel("file")) glog.Infof("Using configuration file %s", *configFile) loadBalancer := proxy.NewLoadBalancerRR() proxier := proxy.NewProxier(loadBalancer) // Wire proxier to handle changes to services serviceConfig.RegisterHandler(proxier) // And wire loadBalancer to handle changes to endpoints to services endpointsConfig.RegisterHandler(loadBalancer) // Just loop forever for now... select {} }
func loadClient() (*client.Client, error) { config, err := loadConfig() if err != nil { return nil, fmt.Errorf("error creating client: %v", err.Error()) } c, err := client.New(config) if err != nil { return nil, fmt.Errorf("error creating client: %v", err.Error()) } return c, nil }