예제 #1
0
파일: github.go 프로젝트: jdef/contrib
func MakeClient(token string) *github.Client {
	var client *http.Client
	var transport http.RoundTripper
	if *useMemoryCache {
		transport = httpcache.NewMemoryCacheTransport()
	} else {
		transport = http.DefaultTransport
	}
	if len(token) > 0 {
		rateLimitTransport := &RateLimitRoundTripper{
			delegate: transport,
			// Global limit is 5000 Q/Hour, try to only use 1800 to make room for other apps
			throttle: util.NewTokenBucketRateLimiter(0.5, 10),
		}
		ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})
		client = &http.Client{
			Transport: &oauth2.Transport{
				Base:   rateLimitTransport,
				Source: oauth2.ReuseTokenSource(nil, ts),
			},
		}
	} else {
		rateLimitTransport := &RateLimitRoundTripper{
			delegate: transport,
			throttle: util.NewTokenBucketRateLimiter(0.01, 10),
		}
		client = &http.Client{
			Transport: rateLimitTransport,
		}
	}
	return github.NewClient(client)
}
예제 #2
0
func (config *GithubConfig) PreExecute() error {
	if len(config.Org) == 0 {
		glog.Fatalf("--organization is required.")
	}
	if len(config.Project) == 0 {
		glog.Fatalf("--project is required.")
	}

	token := config.Token
	if len(token) == 0 && len(config.TokenFile) != 0 {
		data, err := ioutil.ReadFile(config.TokenFile)
		if err != nil {
			glog.Fatalf("error reading token file: %v", err)
		}
		token = string(data)
	}

	var client *http.Client
	var transport http.RoundTripper
	if config.useMemoryCache {
		transport = httpcache.NewMemoryCacheTransport()
	} else {
		transport = http.DefaultTransport
	}
	if len(token) > 0 {
		rateLimitTransport := &RateLimitRoundTripper{
			delegate: transport,
			// Global limit is 5000 Q/Hour, try to only use 1800 to make room for other apps
			throttle: util.NewTokenBucketRateLimiter(0.5, 10),
		}
		ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})
		client = &http.Client{
			Transport: &oauth2.Transport{
				Base:   rateLimitTransport,
				Source: oauth2.ReuseTokenSource(nil, ts),
			},
		}
	} else {
		rateLimitTransport := &RateLimitRoundTripper{
			delegate: transport,
			throttle: util.NewTokenBucketRateLimiter(0.01, 10),
		}
		client = &http.Client{
			Transport: rateLimitTransport,
		}
	}
	config.client = github.NewClient(client)
	return nil
}
예제 #3
0
func main() {
	var ingClient client.IngressInterface
	if kubeClient, err := client.NewInCluster(); err != nil {
		log.Fatalf("Failed to create client: %v.", err)
	} else {
		ingClient = kubeClient.Extensions().Ingress(os.Getenv("INGRESS_NAMESPACE"))
	}
	tmpl, _ := template.New("haproxy").Parse(haproxyConf)
	rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1)
	known := &extensions.IngressList{}

	// Controller loop
	shellOut("haproxy -f /etc/haproxy/haproxy.cfg -p /var/run/haproxy-private.pid")
	for {
		rateLimiter.Accept()
		ingresses, err := ingClient.List(api.ListOptions{})
		if err != nil {
			log.Printf("Error retrieving ingresses: %v", err)
			continue
		}
		if reflect.DeepEqual(ingresses.Items, known.Items) {
			log.Printf("Nothing Has Changed")
			continue
		}
		known = ingresses
		if w, err := os.Create("/etc/haproxy/haproxy.cfg"); err != nil {
			log.Fatalf("Failed to open %v: %v", haproxyConf, err)
			defer w.Close()
		} else if err := tmpl.Execute(w, ingresses); err != nil {
			log.Fatalf("Failed to write template %v", err)
		}
		restartHaproxy("haproxy_reload")
	}
}
예제 #4
0
파일: router.go 프로젝트: jchauncey/router
func main() {
	nginx.Start()
	kubeClient, err := client.NewInCluster()
	if err != nil {
		log.Fatalf("Failed to create client: %v.", err)
	}
	rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1)
	known := &model.RouterConfig{}
	// Main loop
	for {
		rateLimiter.Accept()
		routerConfig, err := model.Build(kubeClient)
		if err != nil {
			log.Printf("Error building model; not modifying certs or configuration: %v.", err)
			continue
		}
		if reflect.DeepEqual(routerConfig, known) {
			continue
		}
		log.Println("INFO: Router configuration has changed in k8s.")
		err = nginx.WriteCerts(routerConfig, "/opt/nginx/ssl")
		if err != nil {
			log.Printf("Failed to write certs; continuing with existing certs and configuration: %v", err)
			continue
		}
		err = nginx.WriteConfig(routerConfig, "/opt/nginx/conf/nginx.conf")
		if err != nil {
			log.Printf("Failed to write new nginx configuration; continuing with existing configuration: %v", err)
			continue
		}
		nginx.Reload()
		known = routerConfig
	}
}
예제 #5
0
// newIPVSController creates a new controller from the given config.
func newIPVSController(kubeClient *unversioned.Client, namespace string, useUnicast bool, password string) *ipvsControllerController {
	ipvsc := ipvsControllerController{
		client:            kubeClient,
		queue:             workqueue.New(),
		reloadRateLimiter: util.NewTokenBucketRateLimiter(reloadQPS, int(reloadQPS)),
		reloadLock:        &sync.Mutex{},
	}

	clusterNodes := getClusterNodesIP(kubeClient)

	nodeInfo, err := getNodeInfo(clusterNodes)
	if err != nil {
		glog.Fatalf("Error getting local IP from nodes in the cluster: %v", err)
	}

	neighbors := getNodeNeighbors(nodeInfo, clusterNodes)

	ipvsc.keepalived = &keepalived{
		iface:      nodeInfo.iface,
		ip:         nodeInfo.ip,
		netmask:    nodeInfo.netmask,
		nodes:      clusterNodes,
		neighbors:  neighbors,
		priority:   getNodePriority(nodeInfo.ip, clusterNodes),
		useUnicast: useUnicast,
		password:   password,
	}

	enqueue := func(obj interface{}) {
		key, err := keyFunc(obj)
		if err != nil {
			glog.Infof("Couldn't get key for object %+v: %v", obj, err)
			return
		}

		ipvsc.queue.Add(key)
	}

	eventHandlers := framework.ResourceEventHandlerFuncs{
		AddFunc:    enqueue,
		DeleteFunc: enqueue,
		UpdateFunc: func(old, cur interface{}) {
			if !reflect.DeepEqual(old, cur) {
				enqueue(cur)
			}
		},
	}

	ipvsc.svcLister.Store, ipvsc.svcController = framework.NewInformer(
		cache.NewListWatchFromClient(
			ipvsc.client, "services", namespace, fields.Everything()),
		&api.Service{}, resyncPeriod, eventHandlers)

	ipvsc.epLister.Store, ipvsc.epController = framework.NewInformer(
		cache.NewListWatchFromClient(
			ipvsc.client, "endpoints", namespace, fields.Everything()),
		&api.Endpoints{}, resyncPeriod, eventHandlers)

	return &ipvsc
}
예제 #6
0
func NewTurboScheduler(kubeClient *client.Client, vmturboMeta *vmtmeta.VMTMeta) *TurboScheduler {
	scheduledPodLister := &cache.StoreToPodLister{}
	podQueue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)

	modeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{Store: podQueue}, scheduledPodLister)

	bindPodsQPS := float32(15.0)
	bindPodsBurst := 20
	rateLimiter := util.NewTokenBucketRateLimiter(bindPodsQPS, bindPodsBurst)

	config := &Config{
		Modeler:             modeler,
		Binder:              &binder{kubeClient},
		BindPodsRateLimiter: rateLimiter,
	}
	eventBroadcaster := record.NewBroadcaster()
	config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "turboscheduler"})
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))

	vmtSched := vmtscheduler.NewVMTScheduler(kubeClient, vmturboMeta)
	glog.V(3).Infof("VMTScheduler is set: %++v", vmtSched)

	defaultSched := defaultscheduler.NewDefaultScheduler(kubeClient)
	glog.V(3).Infof("DefaultScheduler is set: %++v", defaultSched)

	return &TurboScheduler{
		config:           config,
		vmtScheduler:     vmtSched,
		defaultScheduler: defaultSched,
	}
}
예제 #7
0
func main() {
	var ingClient client.IngressInterface
	if kubeClient, err := client.NewInCluster(); err != nil {
		log.Fatalf("Failed to create client: %v.", err)
	} else {
		ingClient = kubeClient.Extensions().Ingress(api.NamespaceAll)
	}
	tmpl, _ := template.New("nginx").Parse(nginxConf)
	rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1)
	known := &extensions.IngressList{}

	// Controller loop
	shellOut("nginx")
	for {
		rateLimiter.Accept()
		ingresses, err := ingClient.List(labels.Everything(), fields.Everything())
		if err != nil || reflect.DeepEqual(ingresses.Items, known.Items) {
			continue
		}
		known = ingresses
		if w, err := os.Create("/etc/nginx/nginx.conf"); err != nil {
			log.Fatalf("Failed to open %v: %v", nginxConf, err)
		} else if err := tmpl.Execute(w, ingresses); err != nil {
			log.Fatalf("Failed to write template %v", err)
		}
		shellOut("nginx -s reload")
	}
}
예제 #8
0
파일: factory.go 프로젝트: Tlacenka/origin
// CreateDeleteController constructs a BuildPodDeleteController
func (factory *BuildPodControllerFactory) CreateDeleteController() controller.RunnableController {

	client := ControllerClient{factory.KubeClient, factory.OSClient}
	queue := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, nil)
	cache.NewReflector(&buildPodDeleteLW{client, queue}, &kapi.Pod{}, queue, 5*time.Minute).Run()

	buildPodDeleteController := &buildcontroller.BuildPodDeleteController{
		BuildStore:   factory.buildStore,
		BuildUpdater: factory.BuildUpdater,
	}

	return &controller.RetryController{
		Queue: queue,
		RetryManager: controller.NewQueueRetryManager(
			queue,
			cache.MetaNamespaceKeyFunc,
			controller.RetryNever,
			kutil.NewTokenBucketRateLimiter(1, 10)),
		Handle: func(obj interface{}) error {
			deltas := obj.(cache.Deltas)
			for _, delta := range deltas {
				if delta.Type == cache.Deleted {
					return buildPodDeleteController.HandleBuildPodDeletion(delta.Object.(*kapi.Pod))
				}
			}
			return nil
		},
	}
}
예제 #9
0
// Create constructs a BuildPodController
func (factory *BuildPodControllerFactory) Create() controller.RunnableController {
	factory.buildStore = cache.NewStore(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&buildLW{client: factory.OSClient}, &buildapi.Build{}, factory.buildStore, 2*time.Minute).RunUntil(factory.Stop)

	queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&podLW{client: factory.KubeClient}, &kapi.Pod{}, queue, 2*time.Minute).RunUntil(factory.Stop)

	client := ControllerClient{factory.KubeClient, factory.OSClient}
	buildPodController := &buildcontroller.BuildPodController{
		BuildStore:   factory.buildStore,
		BuildUpdater: factory.BuildUpdater,
		PodManager:   client,
	}

	return &controller.RetryController{
		Queue: queue,
		RetryManager: controller.NewQueueRetryManager(
			queue,
			cache.MetaNamespaceKeyFunc,
			retryFunc("BuildPod", nil),
			kutil.NewTokenBucketRateLimiter(1, 10)),
		Handle: func(obj interface{}) error {
			pod := obj.(*kapi.Pod)
			return buildPodController.HandlePod(pod)
		},
	}
}
예제 #10
0
// Create creates a new ImageChangeController which is used to trigger builds when a new
// image is available
func (factory *ImageChangeControllerFactory) Create() controller.RunnableController {
	queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&imageStreamLW{factory.Client}, &imageapi.ImageStream{}, queue, 2*time.Minute).RunUntil(factory.Stop)

	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&buildConfigLW{client: factory.Client}, &buildapi.BuildConfig{}, store, 2*time.Minute).RunUntil(factory.Stop)

	imageChangeController := &buildcontroller.ImageChangeController{
		BuildConfigStore:        store,
		BuildConfigInstantiator: factory.BuildConfigInstantiator,
	}

	return &controller.RetryController{
		Queue: queue,
		RetryManager: controller.NewQueueRetryManager(
			queue,
			cache.MetaNamespaceKeyFunc,
			retryFunc("ImageStream update", func(err error) bool {
				_, isFatal := err.(buildcontroller.ImageChangeControllerFatalError)
				return isFatal
			}),
			kutil.NewTokenBucketRateLimiter(1, 10),
		),
		Handle: func(obj interface{}) error {
			imageRepo := obj.(*imageapi.ImageStream)
			return imageChangeController.HandleImageRepo(imageRepo)
		},
	}
}
예제 #11
0
파일: factory.go 프로젝트: Tlacenka/origin
// Create constructs a BuildController
func (factory *BuildControllerFactory) Create() controller.RunnableController {
	queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&buildLW{client: factory.OSClient}, &buildapi.Build{}, queue, 2*time.Minute).Run()

	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))

	client := ControllerClient{factory.KubeClient, factory.OSClient}
	buildController := &buildcontroller.BuildController{
		BuildUpdater:      factory.BuildUpdater,
		ImageStreamClient: client,
		PodManager:        client,
		BuildStrategy: &typeBasedFactoryStrategy{
			DockerBuildStrategy: factory.DockerBuildStrategy,
			SourceBuildStrategy: factory.SourceBuildStrategy,
			CustomBuildStrategy: factory.CustomBuildStrategy,
		},
		Recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "build-controller"}),
	}

	return &controller.RetryController{
		Queue: queue,
		RetryManager: controller.NewQueueRetryManager(
			queue,
			cache.MetaNamespaceKeyFunc,
			limitedLogAndRetry(factory.BuildUpdater, 30*time.Minute),
			kutil.NewTokenBucketRateLimiter(1, 10)),
		Handle: func(obj interface{}) error {
			build := obj.(*buildapi.Build)
			return buildController.HandleBuild(build)
		},
	}
}
예제 #12
0
// NewRESTClient creates a new RESTClient. This client performs generic REST functions
// such as Get, Put, Post, and Delete on specified paths.  Codec controls encoding and
// decoding of responses from the server.
func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConfig, maxQPS float32, maxBurst int, client *http.Client) *RESTClient {
	base := *baseURL
	if !strings.HasSuffix(base.Path, "/") {
		base.Path += "/"
	}
	base.RawQuery = ""
	base.Fragment = ""

	if config.GroupVersion == nil {
		config.GroupVersion = &unversioned.GroupVersion{}
	}
	if len(config.ContentType) == 0 {
		config.ContentType = "application/json"
	}

	var throttle util.RateLimiter
	if maxQPS > 0 {
		throttle = util.NewTokenBucketRateLimiter(maxQPS, maxBurst)
	}
	return &RESTClient{
		base:             &base,
		versionedAPIPath: versionedAPIPath,
		contentConfig:    config,
		Throttle:         throttle,
		Client:           client,
	}
}
예제 #13
0
// Create creates an ImportController.
func (f *ImportControllerFactory) Create() controller.RunnableController {
	lw := &cache.ListWatch{
		ListFunc: func() (runtime.Object, error) {
			return f.Client.ImageStreams(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return f.Client.ImageStreams(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
		},
	}
	q := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(lw, &api.ImageStream{}, q, 2*time.Minute).Run()

	c := &ImportController{
		streams:  f.Client,
		mappings: f.Client,
	}

	return &controller.RetryController{
		Queue: q,
		RetryManager: controller.NewQueueRetryManager(
			q,
			cache.MetaNamespaceKeyFunc,
			func(obj interface{}, err error, retries controller.Retry) bool {
				util.HandleError(err)
				return retries.Count < 5
			},
			kutil.NewTokenBucketRateLimiter(1, 10),
		),
		Handle: func(obj interface{}) error {
			r := obj.(*api.ImageStream)
			return c.Next(r)
		},
	}
}
예제 #14
0
func main() {
	var ingClient client.IngressInterface
	if kubeClient, err := client.NewInCluster(); err != nil {
		log.Fatalf("Failed to create client: %v.", err)
	} else {
		ingClient = kubeClient.Extensions().Ingress("devops-test")
	}
	tmpl, _ := template.New("nginx").Parse(nginxConf)
	rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1)
	known := &extensions.IngressList{}

	// Controller loop
	shellOut("nginx")
	for {
		rateLimiter.Accept()
		ingresses, err := ingClient.List(api.ListOptions{})
		if err != nil {
			log.Printf("Error retrieving ingresses: %v", err)
			continue
		}
		if reflect.DeepEqual(ingresses.Items, known.Items) {
			continue
		}
		known = ingresses
		if w, err := os.Create("/etc/nginx/nginx.conf"); err != nil {
			log.Fatalf("Failed to open %v: %v", nginxConf, err)
		} else if err := tmpl.Execute(w, ingresses); err != nil {
			log.Fatalf("Failed to write template %v", err)
		}

		restartNginx("nginx -t")

	}
}
예제 #15
0
func main() {
	var ingClient client.IngressInterface
	var secretsClient client.SecretsInterface
	/* Anon http client
	config := client.Config{
		Host:     "http://localhost:8080",
		Username: "******",
		Password: "******",
	}
	kubeClient, err := client.New(&config)
	*/
	kubeClient, err := client.NewInCluster()
	if err != nil {
		log.Fatalf("Failed to create client: %v.", err)
	} else {
		ingClient = kubeClient.Extensions().Ingress(api.NamespaceAll)
		secretsClient = kubeClient.Secrets(api.NamespaceAll)
	}
	tmpl := template.New("nginx.tmpl").Funcs(template.FuncMap{"hasprefix": hasPrefix, "hassuffix": hasSuffix})
	if _, err := tmpl.ParseFiles("./nginx.tmpl"); err != nil {
		log.Fatalf("Failed to parse template %v", err)
	}

	rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1)
	known := &extensions.IngressList{}
	known_secrets := &api.SecretList{}

	// Controller loop
	shellOut("nginx")
	for {
		rateLimiter.Accept()
		ingresses, err := ingClient.List(api.ListOptions{})
		if err != nil {
			log.Printf("Error retrieving ingresses: %v", err)
			continue
		}
		secrets, err := secretsClient.List(api.ListOptions{})
		if err != nil {
			log.Printf("Error retrieving secrets: %v", err)
			continue
		}
		if reflect.DeepEqual(ingresses.Items, known.Items) && reflect.DeepEqual(secrets.Items, known_secrets.Items) {
			continue
		}
		// Process SSL context
		// old values
		known = ingresses
		known_secrets = secrets
		// context variable
		context := &Context{Ingress: ingresses, Secrets: secrets}
		if w, err := os.Create("/etc/nginx/nginx.conf"); err != nil {
			log.Fatalf("Failed to open %v: %v", err)
		} else if err := tmpl.Execute(w, context); err != nil {
			log.Fatalf("Failed to write template %v", err)
		}
		shellOut("nginx -s reload")
	}
}
예제 #16
0
// newLoadBalancerController creates a new controller from the given config.
func newLoadBalancerController(cfg *loadBalancerConfig, kubeClient *unversioned.Client, namespace string) *loadBalancerController {

	lbc := loadBalancerController{
		cfg:    cfg,
		client: kubeClient,
		queue:  workqueue.New(),
		reloadRateLimiter: util.NewTokenBucketRateLimiter(
			reloadQPS, int(reloadQPS)),
		targetService:   *targetService,
		forwardServices: *forwardServices,
		httpPort:        *httpPort,
		tcpServices:     map[string]int{},
	}

	for _, service := range strings.Split(*tcpServices, ",") {
		portSplit := strings.Split(service, ":")
		if len(portSplit) != 2 {
			glog.Errorf("Ignoring misconfigured TCP service %v", service)
			continue
		}
		if port, err := strconv.Atoi(portSplit[1]); err != nil {
			glog.Errorf("Ignoring misconfigured TCP service %v: %v", service, err)
			continue
		} else {
			lbc.tcpServices[portSplit[0]] = port
		}
	}
	enqueue := func(obj interface{}) {
		key, err := keyFunc(obj)
		if err != nil {
			glog.Infof("Couldn't get key for object %+v: %v", obj, err)
			return
		}
		lbc.queue.Add(key)
	}
	eventHandlers := framework.ResourceEventHandlerFuncs{
		AddFunc:    enqueue,
		DeleteFunc: enqueue,
		UpdateFunc: func(old, cur interface{}) {
			if !reflect.DeepEqual(old, cur) {
				enqueue(cur)
			}
		},
	}

	lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
		cache.NewListWatchFromClient(
			lbc.client, "services", namespace, fields.Everything()),
		&api.Service{}, resyncPeriod, eventHandlers)

	lbc.epLister.Store, lbc.epController = framework.NewInformer(
		cache.NewListWatchFromClient(
			lbc.client, "endpoints", namespace, fields.Everything()),
		&api.Endpoints{}, resyncPeriod, eventHandlers)

	return &lbc
}
예제 #17
0
func newAltTokenSource(tokenURL string) oauth2.TokenSource {
	client := oauth2.NewClient(oauth2.NoContext, google.ComputeTokenSource(""))
	a := &altTokenSource{
		oauthClient: client,
		tokenURL:    tokenURL,
		throttle:    util.NewTokenBucketRateLimiter(tokenURLQPS, tokenURLBurst),
	}
	return oauth2.ReuseTokenSource(nil, a)
}
예제 #18
0
func CreateConfig(kubeClient *client.Client) *scheduler.Config {
	configFactory := factory.NewConfigFactory(kubeClient, util.NewTokenBucketRateLimiter(15.0, 20.0))
	config, err := createConfigFromDefaultProvider(configFactory)
	if err != nil {
		glog.Fatalf("Failed to create scheduler configuration: %v", err)
	}

	return config
}
예제 #19
0
func main() {
	var ingClient client.IngressInterface
	if kubeClient, err := client.NewInCluster(); err != nil {
		log.Fatalf("Failed to create client: %v.", err)
	} else {
		ingClient = kubeClient.Extensions().Ingress(api.NamespaceAll)
	}
	tmpl, _ := template.New("nginx").Parse(nginxConf)
	tmplSSL, _ := template.New("nginx").Parse(nginxSSLConf)
	rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1)
	known := &extensions.IngressList{}

	// Controller loop
	//shellOut("nginx")
	for {
		rateLimiter.Accept()
		ingresses, err := ingClient.List(api.ListOptions{})

		fmt.Printf("Port %+v\n", ingresses.Items[0].Spec.Rules[0].HTTP.Paths[0].Backend.ServicePort.IntValue())
		fmt.Printf("Path %+v\n", ingresses.Items[0].Spec.Rules[0].HTTP.Paths[0].Path)

		var httpIngressList extensions.IngressList
		var httpsIngressList extensions.IngressList

		for _, v := range ingresses.Items {
			fmt.Printf("%#v\n", v)
			if v.Spec.Rules[0].HTTP.Paths[0].Backend.ServicePort.IntVal == 80 {
				httpIngressList.Items = append(httpIngressList.Items, v)
			} else if v.Spec.Rules[0].HTTP.Paths[0].Backend.ServicePort.IntVal == 443 {
				httpsIngressList.Items = append(httpsIngressList.Items, v)
			}
		}

		if err != nil {
			log.Printf("Error retrieving ingresses: %v", err)
			continue
		}
		if reflect.DeepEqual(ingresses.Items, known.Items) {
			continue
		}
		known = ingresses
		if w, err := os.Create("/etc/nginx/conf.d/appcloud.conf"); err != nil {
			log.Fatalf("Failed to open %v: %v", nginxConf, err)
		} else if err := tmpl.Execute(w, httpIngressList); err != nil {
			log.Fatalf("Failed to write template %v", err)
		}

		if w, err := os.Create("/etc/nginx/conf.d/appcloud-ssl.conf"); err != nil {
			log.Fatalf("Failed to open %v: %v", nginxSSLConf, err)
		} else if err := tmplSSL.Execute(w, httpsIngressList); err != nil {
			log.Fatalf("Failed to write template %v", err)
		}

		//	shellOut("nginx -s reload")
	}
}
예제 #20
0
파일: factory.go 프로젝트: ncantor/origin
// Create creates a DeploymentConfigChangeController.
func (factory *DeploymentConfigChangeControllerFactory) Create() controller.RunnableController {
	deploymentConfigLW := &deployutil.ListWatcherImpl{
		ListFunc: func() (runtime.Object, error) {
			return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
		},
	}
	queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, queue, 2*time.Minute).Run()

	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))

	changeController := &DeploymentConfigChangeController{
		changeStrategy: &changeStrategyImpl{
			getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
				return factory.KubeClient.ReplicationControllers(namespace).Get(name)
			},
			generateDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) {
				return factory.Client.DeploymentConfigs(namespace).Generate(name)
			},
			updateDeploymentConfigFunc: func(namespace string, config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) {
				return factory.Client.DeploymentConfigs(namespace).Update(config)
			},
		},
		decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) {
			return deployutil.DecodeDeploymentConfig(deployment, factory.Codec)
		},
		recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployer"}),
	}

	return &controller.RetryController{
		Queue: queue,
		RetryManager: controller.NewQueueRetryManager(
			queue,
			cache.MetaNamespaceKeyFunc,
			func(obj interface{}, err error, retries controller.Retry) bool {
				kutil.HandleError(err)
				if _, isFatal := err.(fatalError); isFatal {
					return false
				}
				if retries.Count > 0 {
					return false
				}
				return true
			},
			kutil.NewTokenBucketRateLimiter(1, 10),
		),
		Handle: func(obj interface{}) error {
			config := obj.(*deployapi.DeploymentConfig)
			return changeController.Handle(config)
		},
	}
}
예제 #21
0
// Run runs the specified SchedulerServer.  This should never exit.
func (s *SchedulerServer) Run(_ []string) error {
	if s.Kubeconfig == "" && s.Master == "" {
		glog.Warningf("Neither --kubeconfig nor --master was specified.  Using default API client.  This might not work.")
	}

	// This creates a client, first loading any specified kubeconfig
	// file, and then overriding the Master flag, if non-empty.
	kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
		&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
		&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
	if err != nil {
		return err
	}

	// Override kubeconfig qps/burst settings from flags
	kubeconfig.QPS = s.KubeAPIQPS
	kubeconfig.Burst = s.KubeAPIBurst

	kubeClient, err := client.New(kubeconfig)
	if err != nil {
		glog.Fatalf("Invalid API configuration: %v", err)
	}

	go func() {
		mux := http.NewServeMux()
		healthz.InstallHandler(mux)
		if s.EnableProfiling {
			mux.HandleFunc("/debug/pprof/", pprof.Index)
			mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
			mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
		}
		mux.Handle("/metrics", prometheus.Handler())

		server := &http.Server{
			Addr:    net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)),
			Handler: mux,
		}
		glog.Fatal(server.ListenAndServe())
	}()

	configFactory := factory.NewConfigFactory(kubeClient, util.NewTokenBucketRateLimiter(s.BindPodsQPS, s.BindPodsBurst))
	config, err := s.createConfig(configFactory)
	if err != nil {
		glog.Fatalf("Failed to create scheduler configuration: %v", err)
	}

	eventBroadcaster := record.NewBroadcaster()
	config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))

	sched := scheduler.New(config)
	sched.Run()

	select {}
}
예제 #22
0
// Create creates an ImportController.
func (f *ImportControllerFactory) Create() (controller.RunnableController, controller.StoppableController) {
	lw := &cache.ListWatch{
		ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
			return f.Client.ImageStreams(kapi.NamespaceAll).List(options)
		},
		WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
			return f.Client.ImageStreams(kapi.NamespaceAll).Watch(options)
		},
	}
	q := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(lw, &api.ImageStream{}, q, f.ResyncInterval).Run()

	// instantiate a scheduled importer using a number of buckets
	buckets := 4
	switch {
	case f.MinimumCheckInterval > time.Hour:
		buckets = 8
	case f.MinimumCheckInterval < 10*time.Minute:
		buckets = 2
	}
	seconds := f.MinimumCheckInterval / time.Second
	bucketQPS := 1.0 / float32(seconds) * float32(buckets)

	limiter := util.NewTokenBucketRateLimiter(bucketQPS, 1)
	b := newScheduled(f.ScheduleEnabled, f.Client, buckets, limiter, f.ImportRateLimiter)

	// instantiate an importer for changes that happen to the image stream
	changed := &controller.RetryController{
		Queue: q,
		RetryManager: controller.NewQueueRetryManager(
			q,
			cache.MetaNamespaceKeyFunc,
			func(obj interface{}, err error, retries controller.Retry) bool {
				utilruntime.HandleError(err)
				return retries.Count < 5
			},
			util.NewTokenBucketRateLimiter(1, 10),
		),
		Handle: b.Handle,
	}

	return changed, b.scheduler
}
예제 #23
0
// RunNodeController starts the node controller
func (c *MasterConfig) RunNodeController() {
	s := c.ControllerManager
	controller := nodecontroller.NewNodeController(
		c.CloudProvider,
		c.KubeClient,
		s.PodEvictionTimeout,

		util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
		util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), // upstream uses the same ones too

		s.NodeMonitorGracePeriod,
		s.NodeStartupGracePeriod,
		s.NodeMonitorPeriod,

		(*net.IPNet)(&s.ClusterCIDR),
		s.AllocateNodeCIDRs,
	)

	controller.Run(s.NodeSyncPeriod)
}
예제 #24
0
func (s *Services) Start() {
	rl := util.NewTokenBucketRateLimiter(0.1, 1)

	for {
		rl.Accept()

		// First we need to load the services.
		svcs, err := s.Client.Services("").List(labels.Everything())
		if err != nil {
			fmt.Printf("Error retrieving services: %v\n", err)
			continue
		}

		// We build a fresh list every time to ensure we don't have any issues with old data.
		newSvcs := make(map[string][]string)

		// Now we go over all the services and associate the pod IP addresses
		// to each of the services.
		for _, svc := range svcs.Items {
			var addrs []string

			ps, err := s.Client.Pods(svc.ObjectMeta.Namespace).List(labels.SelectorFromSet(labels.Set(svc.Spec.Selector)), fields.Everything())
			if err != nil {
				fmt.Printf("Error retrieving service: %v\n", err)
				continue
			}

			name := MergeNameNameSpace(svc.ObjectMeta.Namespace, svc.ObjectMeta.Name)

			// Add all the running pods to the list.
			for _, p := range ps.Items {
				if p.Status.Phase != api.PodRunning {
					fmt.Printf("Skipping pod %s for service %s\n", p.Name, name)
					continue
				}
				fmt.Printf("Added pod %s for service %s\n", p.Name, name)
				addrs = append(addrs, p.Status.PodIP+":80")
			}

			// Ensure we have some addresses, if we don't, we don't have to
			// worry about adding this service.
			if len(addrs) <= 0 {
				fmt.Printf("The service %s did not contain any upstream servers\n", name)
				continue
			}

			fmt.Printf("Added the service: %v\n", name)
			newSvcs[name] = addrs
		}

		// Now that we have built the list we can hand it over so be used for Get() requests.
		s.List = newSvcs
	}
}
예제 #25
0
// NewRateLimitedFunction creates a new rate limited function.
func NewRateLimitedFunction(keyFunc kcache.KeyFunc, interval int, handlerFunc HandlerFunc) *RateLimitedFunction {
	fifo := kcache.NewFIFO(keyFunc)

	qps := float32(1000.0) // Call rate per second (SLA).
	if interval > 0 {
		qps = float32(1.0 / float32(interval))
	}

	limiter := kutil.NewTokenBucketRateLimiter(qps, 1)

	return &RateLimitedFunction{handlerFunc, fifo, limiter}
}
예제 #26
0
파일: github.go 프로젝트: wojtek-t/contrib
// PreExecute will initialize the GithubConfig. It MUST be run before the config
// may be used to get information from Github
func (config *GithubConfig) PreExecute() error {
	if len(config.Org) == 0 {
		glog.Fatalf("--organization is required.")
	}
	if len(config.Project) == 0 {
		glog.Fatalf("--project is required.")
	}

	token := config.Token
	if len(token) == 0 && len(config.TokenFile) != 0 {
		data, err := ioutil.ReadFile(config.TokenFile)
		if err != nil {
			glog.Fatalf("error reading token file: %v", err)
		}
		token = string(data)
	}

	transport := http.DefaultTransport
	if config.useMemoryCache {
		transport = httpcache.NewMemoryCacheTransport()
	}

	// convert from queries per hour to queries per second
	config.RateLimit = config.RateLimit / 3600
	// ignore the configured rate limit if you don't have a token.
	// only get 60 requests per hour!
	if len(token) == 0 {
		glog.Warningf("Ignoring --rate-limit because no token data available")
		config.RateLimit = 0.01
		config.RateLimitBurst = 10
	}
	rateLimitTransport := &rateLimitRoundTripper{
		delegate: transport,
		throttle: util.NewTokenBucketRateLimiter(config.RateLimit, config.RateLimitBurst),
	}

	client := &http.Client{
		Transport: rateLimitTransport,
	}
	if len(token) > 0 {
		ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})
		client = &http.Client{
			Transport: &oauth2.Transport{
				Base:   rateLimitTransport,
				Source: oauth2.ReuseTokenSource(nil, ts),
			},
		}
	}
	config.client = github.NewClient(client)
	config.analytics.lastAPIReset = time.Now()
	return nil
}
예제 #27
0
파일: factory.go 프로젝트: rrati/origin
// Create creates a DeploymentConfigController.
func (factory *DeploymentConfigControllerFactory) Create() controller.RunnableController {
	deploymentConfigLW := &cache.ListWatch{
		ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
			return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(options)
		},
		WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
			return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(options)
		},
	}
	queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, queue, 2*time.Minute).Run()

	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))
	recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deploymentconfig-controller"})

	configController := NewDeploymentConfigController(factory.KubeClient, factory.Client, factory.Codec, recorder)

	return &controller.RetryController{
		Queue: queue,
		RetryManager: controller.NewQueueRetryManager(
			queue,
			cache.MetaNamespaceKeyFunc,
			func(obj interface{}, err error, retries controller.Retry) bool {
				config := obj.(*deployapi.DeploymentConfig)
				// no retries for a fatal error
				if _, isFatal := err.(fatalError); isFatal {
					glog.V(4).Infof("Will not retry fatal error for deploymentConfig %s/%s: %v", config.Namespace, config.Name, err)
					kutil.HandleError(err)
					return false
				}
				// infinite retries for a transient error
				if _, isTransient := err.(transientError); isTransient {
					glog.V(4).Infof("Retrying deploymentConfig %s/%s with error: %v", config.Namespace, config.Name, err)
					return true
				}
				kutil.HandleError(err)
				// no retries for anything else
				if retries.Count > 0 {
					return false
				}
				return true
			},
			kutil.NewTokenBucketRateLimiter(1, 10),
		),
		Handle: func(obj interface{}) error {
			config := obj.(*deployapi.DeploymentConfig)
			return configController.Handle(config)
		},
	}
}
예제 #28
0
func TestQueueRetryManager_retries(t *testing.T) {
	retries := 5
	requeued := map[string]int{}

	manager := &QueueRetryManager{
		queue: &testFifo{
			// Track re-queues
			AddIfNotPresentFunc: func(obj interface{}) error {
				id := obj.(testObj).id
				if _, exists := requeued[id]; !exists {
					requeued[id] = 0
				}
				requeued[id] = requeued[id] + 1
				return nil
			},
		},
		keyFunc: func(obj interface{}) (string, error) {
			return obj.(testObj).id, nil
		},
		retryFunc: func(obj interface{}, err error, r Retry) bool {
			return r.Count < 5 && !r.StartTimestamp.IsZero()
		},
		retries: make(map[string]Retry),
		limiter: kutil.NewTokenBucketRateLimiter(1000, 1000),
	}

	objects := []testObj{
		{"a", 1},
		{"b", 2},
		{"c", 3},
	}

	// Retry one more than the max
	for _, obj := range objects {
		for i := 0; i < retries+1; i++ {
			manager.Retry(obj, nil)
		}
	}

	// Should only have re-queued up to the max retry setting
	for _, obj := range objects {
		if e, a := retries, requeued[obj.id]; e != a {
			t.Fatalf("expected requeue count %d for obj %s, got %d", e, obj.id, a)
		}
	}

	// Should have no more state since all objects were retried beyond max
	if e, a := 0, len(manager.retries); e != a {
		t.Fatalf("expected retry len %d, got %d", e, a)
	}
}
예제 #29
0
// newIPVSController creates a new controller from the given config.
func newIPVSController(kubeClient *unversioned.Client, namespace string, useUnicast bool, configMapName string) *ipvsControllerController {
	ipvsc := ipvsControllerController{
		client:            kubeClient,
		reloadRateLimiter: util.NewTokenBucketRateLimiter(reloadQPS, int(reloadQPS)),
		ruCfg:             []vip{},
		configMapName:     configMapName,
	}

	clusterNodes := getClusterNodesIP(kubeClient)

	nodeInfo, err := getNodeInfo(clusterNodes)
	if err != nil {
		glog.Fatalf("Error getting local IP from nodes in the cluster: %v", err)
	}

	neighbors := getNodeNeighbors(nodeInfo, clusterNodes)

	execer := exec.New()
	dbus := utildbus.New()
	iptInterface := utiliptables.New(execer, dbus, utiliptables.ProtocolIpv4)

	ipvsc.keepalived = &keepalived{
		iface:      nodeInfo.iface,
		ip:         nodeInfo.ip,
		netmask:    nodeInfo.netmask,
		nodes:      clusterNodes,
		neighbors:  neighbors,
		priority:   getNodePriority(nodeInfo.ip, clusterNodes),
		useUnicast: useUnicast,
		ipt:        iptInterface,
	}

	err = ipvsc.keepalived.loadTemplate()
	if err != nil {
		glog.Fatalf("Error loading keepalived template: %v", err)
	}

	eventHandlers := framework.ResourceEventHandlerFuncs{}

	ipvsc.svcLister.Store, ipvsc.svcController = framework.NewInformer(
		cache.NewListWatchFromClient(
			ipvsc.client, "services", namespace, fields.Everything()),
		&api.Service{}, resyncPeriod, eventHandlers)

	ipvsc.epLister.Store, ipvsc.epController = framework.NewInformer(
		cache.NewListWatchFromClient(
			ipvsc.client, "endpoints", namespace, fields.Everything()),
		&api.Endpoints{}, resyncPeriod, eventHandlers)

	return &ipvsc
}
예제 #30
0
// newDockerPuller creates a new instance of the default implementation of DockerPuller.
func newDockerPuller(client DockerInterface, qps float32, burst int) DockerPuller {
	dp := dockerPuller{
		client:  client,
		keyring: credentialprovider.NewDockerKeyring(),
	}

	if qps == 0.0 {
		return dp
	}
	return &throttledDockerPuller{
		puller:  dp,
		limiter: util.NewTokenBucketRateLimiter(qps, burst),
	}
}