// Run runs the CMServer. This should never exit. func (s *CMServer) Run(_ []string) error { if s.Qingconfig == "" && s.Master == "" { glog.Warningf("Neither --qingconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified qingconfig // file, and then overriding the Master flag, if non-empty. qingconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Qingconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() if err != nil { return err } qingconfig.QPS = 20.0 qingconfig.Burst = 30 qingClient, err := client.New(qingconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() endpoints := service.NewEndpointController(qingClient) go endpoints.Run(s.ConcurrentEndpointSyncs, util.NeverStop) controllerManager := replicationControllerPkg.NewReplicationManager(qingClient, replicationControllerPkg.BurstReplicas) go controllerManager.Run(s.ConcurrentRCSyncs, util.NeverStop) cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if s.SyncNodeStatus { glog.Warning("DEPRECATION NOTICE: sync-node-status flag is being deprecated. It has no effect now and it will be removed in a future version.") } nodeController := nodecontroller.NewNodeController(cloud, qingClient, s.RegisterRetryCount, s.PodEvictionTimeout, nodecontroller.NewPodEvictor(util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst)), s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs) nodeController.Run(s.NodeSyncPeriod) serviceController := servicecontroller.New(cloud, qingClient, s.ClusterName) if err := serviceController.Run(s.NodeSyncPeriod); err != nil { glog.Errorf("Failed to start service controller: %v", err) } if s.AllocateNodeCIDRs { routes, ok := cloud.Routes() if !ok { glog.Fatal("Cloud provider must support routes if allocate-node-cidrs is set") } routeController := routecontroller.New(routes, qingClient, s.ClusterName, (*net.IPNet)(&s.ClusterCIDR)) routeController.Run(s.NodeSyncPeriod) } resourceQuotaManager := resourcequota.NewResourceQuotaManager(qingClient) resourceQuotaManager.Run(s.ResourceQuotaSyncPeriod) namespaceManager := namespace.NewNamespaceManager(qingClient, s.NamespaceSyncPeriod) namespaceManager.Run() pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(qingClient, s.PVClaimBinderSyncPeriod) pvclaimBinder.Run() pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(qingClient, s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins()) if err != nil { glog.Fatalf("Failed to start persistent volume recycler: %+v", err) } pvRecycler.Run() if len(s.ServiceAccountKeyFile) > 0 { privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile) if err != nil { glog.Errorf("Error reading key for service account token controller: %v", err) } else { serviceaccount.NewTokensController( qingClient, serviceaccount.DefaultTokenControllerOptions( serviceaccount.JWTTokenGenerator(privateKey), ), ).Run() } } serviceaccount.NewServiceAccountsController( qingClient, serviceaccount.DefaultServiceAccountsControllerOptions(), ).Run() select {} return nil }
func TestPersistentVolumeClaimBinder(t *testing.T) { _, s := runAMaster(t) defer s.Close() deleteAllEtcdKeys() client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) binder := volumeclaimbinder.NewPersistentVolumeClaimBinder(client, 1*time.Second) binder.Run() defer binder.Stop() for _, volume := range createTestVolumes() { _, err := client.PersistentVolumes().Create(volume) if err != nil { t.Fatalf("Unexpected error: %v", err) } } volumes, err := client.PersistentVolumes().List(labels.Everything(), fields.Everything()) if err != nil { t.Fatalf("unexpected error: %v", err) } if len(volumes.Items) != 2 { t.Errorf("expected 2 PVs, got %#v", len(volumes.Items)) } for _, claim := range createTestClaims() { _, err := client.PersistentVolumeClaims(api.NamespaceDefault).Create(claim) if err != nil { t.Fatalf("Unexpected error: %v", err) } } claims, err := client.PersistentVolumeClaims(api.NamespaceDefault).List(labels.Everything(), fields.Everything()) if err != nil { t.Fatalf("unexpected error: %v", err) } if len(claims.Items) != 3 { t.Errorf("expected 3 PVCs, got %#v", len(claims.Items)) } // the binder will eventually catch up and set status on Claims watch, err := client.PersistentVolumeClaims(api.NamespaceDefault).Watch(labels.Everything(), fields.Everything(), "0") if err != nil { t.Fatalf("Couldn't subscribe to PersistentVolumeClaims: %v", err) } defer watch.Stop() boundCount := 0 expectedBoundCount := 2 for { event := <-watch.ResultChan() claim := event.Object.(*api.PersistentVolumeClaim) if claim.Spec.VolumeName != "" { boundCount++ } if boundCount == expectedBoundCount { break } } for _, claim := range createTestClaims() { claim, err := client.PersistentVolumeClaims(api.NamespaceDefault).Get(claim.Name) if err != nil { t.Fatalf("Unexpected error: %v", err) } if (claim.Name == "claim01" || claim.Name == "claim02") && claim.Spec.VolumeName == "" { t.Errorf("Expected claim to be bound: %+v", claim) } if claim.Name == "claim03" && claim.Spec.VolumeName != "" { t.Errorf("Expected claim03 to be unbound: %v", claim) } } }