func main() { resGz := gz.GzipHandler(http.FileServer(http.Dir("res/"))) setupGz := gz.GzipHandler(http.HandlerFunc(setup)) postGz := gz.GzipHandler(http.HandlerFunc(post)) guiGz := gz.GzipHandler(http.HandlerFunc(gui)) http.Handle("/res/", resGz) http.Handle("/authority", setupGz) http.Handle("/authorize", setupGz) http.Handle("/post", postGz) http.Handle("/", guiGz) var pemfile = flag.String("pem", "", "Path to pem file") var keyfile = flag.String("key", "", "Path to key file") flag.Parse() if *pemfile == "" { ror := http.ListenAndServe(":8080", nil) er(ror) } else { config := &tls.Config{MinVersion: tls.VersionTLS10} server := &http.Server{Addr: ":443", Handler: nil, TLSConfig: config} ror := server.ListenAndServeTLS(*pemfile, *keyfile) er(ror) } }
// internalInitialize will initialize the munger for the given GCS bucket url. func (sq *SubmitQueue) internalInitialize(config *github.Config, features *features.Features, GCSBucketUrl string) error { sq.Lock() defer sq.Unlock() sq.githubConfig = config if len(sq.JenkinsHost) == 0 { glog.Fatalf("--jenkins-host is required.") } if sq.FakeE2E { sq.e2e = &fake_e2e.FakeE2ETester{ JobNames: sq.JobNames, WeakStableJobNames: sq.WeakStableJobNames, } } else { sq.e2e = &e2e.RealE2ETester{ JobNames: sq.JobNames, JenkinsHost: sq.JenkinsHost, WeakStableJobNames: sq.WeakStableJobNames, BuildStatus: map[string]e2e.BuildInfo{}, GoogleGCSBucketUtils: utils.NewUtils(GCSBucketUrl), } } if len(config.Address) > 0 { if len(config.WWWRoot) > 0 { http.Handle("/", gziphandler.GzipHandler(http.FileServer(http.Dir(config.WWWRoot)))) } http.Handle("/prs", gziphandler.GzipHandler(http.HandlerFunc(sq.servePRs))) http.Handle("/history", gziphandler.GzipHandler(http.HandlerFunc(sq.serveHistory))) http.Handle("/users", gziphandler.GzipHandler(http.HandlerFunc(sq.serveUsers))) http.Handle("/github-e2e-queue", gziphandler.GzipHandler(http.HandlerFunc(sq.serveGithubE2EStatus))) http.Handle("/google-internal-ci", gziphandler.GzipHandler(http.HandlerFunc(sq.serveGoogleInternalStatus))) http.Handle("/merge-info", gziphandler.GzipHandler(http.HandlerFunc(sq.serveMergeInfo))) http.Handle("/priority-info", gziphandler.GzipHandler(http.HandlerFunc(sq.servePriorityInfo))) http.Handle("/health", gziphandler.GzipHandler(http.HandlerFunc(sq.serveHealth))) http.Handle("/sq-stats", gziphandler.GzipHandler(http.HandlerFunc(sq.serveSQStats))) config.ServeDebugStats("/stats") go http.ListenAndServe(config.Address, nil) } if sq.githubE2EPollTime == 0 { sq.githubE2EPollTime = githubE2EPollTime } sq.health.StartTime = sq.clock.Now() sq.health.NumStablePerJob = map[string]int{} go sq.handleGithubE2EAndMerge() go sq.updateGoogleE2ELoop() return nil }
// mapRoutes() sets up handlers defined in the routes map above func mapRoutes() { for route, handler := range routes { strippedHandler := http.StripPrefix(route, ChangeHeader(handler)) finalHandler := gziphandler.GzipHandler(strippedHandler) http.Handle(route, finalHandler) } }
func BuildHandler(utilsFunc utils.UtilsFunc, doGzip bool) http.Handler { uf = utilsFunc mux := http.NewServeMux() fs := http.FileServer(httpFs{http.Dir("public")}) mux.Handle("/css/", fs) mux.Handle("/html/", fs) mux.Handle("/img/", fs) mux.HandleFunc("/", httpRedirect) mux.HandleFunc("/js/all.js", httpAllJs) mux.HandleFunc("/js/data.json", httpDataJson) mux.HandleFunc("/js/jquery.plugin.js", httpJqueryPluginJs) mux.HandleFunc("/v2/js/data.json", httpDataJson2) mux.HandleFunc("/v2/js/history.json", httpHistoryJson) mux.HandleFunc("/config", httpConfig) handler := cors.Default().Handler(mux) if !doGzip { return handler } return gziphandler.GzipHandler(handler) }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) link = linkio.NewLink(linkio.Throughput(rate)) gzipHandler := gziphandler.GzipHandler(http.HandlerFunc(handler)) http.HandleFunc("/data/", rateLimitedHandler) http.Handle("/", gzipHandler) http.Handle("/data/manifest.json", gzipHandler) log.Fatal(http.ListenAndServe(":8000", nil)) }
// New constructs a new nbaapi.com server. func New(addr string, db *db.DB) *Server { s := &Server{ db: db, } s.Server = http.Server{ Addr: addr, Handler: gziphandler.GzipHandler(http.HandlerFunc(s.ServeHTTP)), } s.initRoutes() return s }
// Run sets up and starts the http server. // It requires a valid port to bind to, and the // default provider to use for web searches. func Run(port int, cert string, key string, provider string) error { err := validatePort(port) if err != nil { return err } indexHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/" { http.NotFound(w, r) return } index(provider, w, r) }) searchHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { search(provider, w, r) }) http.Handle("/", gziphandler.GzipHandler(indexHandler)) http.Handle("/search", gziphandler.GzipHandler(searchHandler)) setupFaviconHandlers() if cert != "" && key != "" { err = http.ListenAndServeTLS(fmt.Sprintf(":%d", port), cert, key, nil) if err != nil { return fmt.Errorf("HTTP Server: %s", err) } } else { err = http.ListenAndServe(fmt.Sprintf(":%d", port), nil) if err != nil { return fmt.Errorf("HTTP Server: %s", err) } } return nil }
func main() { // Setup the global variables and settings err := models.Setup() if err != nil { fmt.Println(err) } wg := &sync.WaitGroup{} wg.Add(1) // Start the web servers go func() { defer wg.Done() adminHandler := gziphandler.GzipHandler(controllers.CreateAdminRouter()) auth.Store.Options.Secure = config.Conf.AdminConf.UseTLS if config.Conf.AdminConf.UseTLS { // use TLS for Admin web server if available Logger.Printf("Starting admin server at https://%s\n", config.Conf.AdminConf.ListenURL) Logger.Fatal(http.ListenAndServeTLS(config.Conf.AdminConf.ListenURL, config.Conf.AdminConf.CertPath, config.Conf.AdminConf.KeyPath, handlers.CombinedLoggingHandler(os.Stdout, adminHandler))) } else { Logger.Printf("Starting admin server at http://%s\n", config.Conf.AdminConf.ListenURL) Logger.Fatal(http.ListenAndServe(config.Conf.AdminConf.ListenURL, handlers.CombinedLoggingHandler(os.Stdout, adminHandler))) } }() wg.Add(1) go func() { defer wg.Done() phishHandler := gziphandler.GzipHandler(controllers.CreatePhishingRouter()) if config.Conf.PhishConf.UseTLS { // use TLS for Phish web server if available Logger.Printf("Starting phishing server at https://%s\n", config.Conf.PhishConf.ListenURL) Logger.Fatal(http.ListenAndServeTLS(config.Conf.PhishConf.ListenURL, config.Conf.PhishConf.CertPath, config.Conf.PhishConf.KeyPath, handlers.CombinedLoggingHandler(os.Stdout, phishHandler))) } else { Logger.Printf("Starting phishing server at http://%s\n", config.Conf.PhishConf.ListenURL) Logger.Fatal(http.ListenAndServe(config.Conf.PhishConf.ListenURL, handlers.CombinedLoggingHandler(os.Stdout, phishHandler))) } }() wg.Wait() }
// Initialize will initialize the munger func (sq *SubmitQueue) Initialize(config *github.Config, features *features.Features) error { sq.Lock() defer sq.Unlock() sq.githubConfig = config if len(sq.JenkinsHost) == 0 { glog.Fatalf("--jenkins-host is required.") } sq.lastE2EStable = true e2e := &e2e.E2ETester{ JobNames: sq.JobNames, JenkinsHost: sq.JenkinsHost, WeakStableJobNames: sq.WeakStableJobNames, BuildStatus: map[string]e2e.BuildInfo{}, } sq.e2e = e2e if len(config.Address) > 0 { if len(config.WWWRoot) > 0 { http.Handle("/", gziphandler.GzipHandler(http.FileServer(http.Dir(config.WWWRoot)))) } http.Handle("/prs", gziphandler.GzipHandler(http.HandlerFunc(sq.servePRs))) http.Handle("/history", gziphandler.GzipHandler(http.HandlerFunc(sq.serveHistory))) http.Handle("/users", gziphandler.GzipHandler(http.HandlerFunc(sq.serveUsers))) http.Handle("/github-e2e-queue", gziphandler.GzipHandler(http.HandlerFunc(sq.serveGithubE2EStatus))) http.Handle("/google-internal-ci", gziphandler.GzipHandler(http.HandlerFunc(sq.serveGoogleInternalStatus))) http.Handle("/merge-info", gziphandler.GzipHandler(http.HandlerFunc(sq.serveMergeInfo))) http.Handle("/priority-info", gziphandler.GzipHandler(http.HandlerFunc(sq.servePriorityInfo))) config.ServeDebugStats("/stats") go http.ListenAndServe(config.Address, nil) } sq.prStatus = map[string]submitStatus{} sq.lastPRStatus = map[string]submitStatus{} sq.githubE2EQueue = map[int]*github.MungeObject{} if sq.githubE2EPollTime == 0 { sq.githubE2EPollTime = githubE2EPollTime } go sq.handleGithubE2EAndMerge() go sq.updateGoogleE2ELoop() return nil }
func main() { bind := flag.String("server.bind", ":1313", "Web server bind address") ver := flag.Bool("v", false, "Print version and exit") faktAPIHost := flag.String("api.fakt.host", "http://localhost:8080/api/v1", "Proxy api to avoid CORS crap") flag.Parse() if *ver { fmt.Printf("%s", VERSION) os.Exit(0) } //static assets var staticFileServer http.Handler if os.Getenv("DEV") == "true" { log.Print("DEV is enabled") staticFileServer = http.FileServer(http.Dir("ui/static")) } else { //in production use embedded files staticFileServer = http.FileServer(FS(false)) } //routing mux := http.NewServeMux() mux.Handle("/ui/", http.RedirectHandler("/", http.StatusMovedPermanently)) mux.Handle("/", http.StripPrefix("/", gziphandler.GzipHandler(staticFileServer))) //API proxy apiHostParsed, err := url.Parse(*faktAPIHost) if err != nil { log.Fatalf("Invalid URL for api.host: %s", *faktAPIHost) } log.Printf("Proxying API calls to: %s", apiHostParsed) mux.Handle( "/api/v1/", http.StripPrefix("/api/v1/", httputil.NewSingleHostReverseProxy(apiHostParsed)), ) for true { log.Printf("Listening on %s", *bind) err := http.ListenAndServe(*bind, mux) log.Printf("SERVER FAILED: %s", err.Error()) time.Sleep(1 * time.Second) //retry in 1 second } }
func InitWeb() { l4g.Debug(utils.T("web.init.debug")) mainrouter := api.Srv.Router if *utils.Cfg.ServiceSettings.WebserverMode != "disabled" { staticDir := utils.FindDir(CLIENT_DIR) l4g.Debug("Using client directory at %v", staticDir) if *utils.Cfg.ServiceSettings.WebserverMode == "gzip" { mainrouter.PathPrefix("/static/").Handler(gziphandler.GzipHandler(staticHandler(http.StripPrefix("/static/", http.FileServer(http.Dir(staticDir)))))) } else { mainrouter.PathPrefix("/static/").Handler(staticHandler(http.StripPrefix("/static/", http.FileServer(http.Dir(staticDir))))) } mainrouter.Handle("/{anything:.*}", api.AppHandlerIndependent(root)).Methods("GET") } }
func main() { router := httprouter.New() router.GET("/api/joblist", getjoblist) router.GET("/api/log/:jobid", getlog) router.GET("/", func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { http.ServeFile(w, r, "static/index.html") }) s := &http.Server{ Addr: ":8181", Handler: gziphandler.GzipHandler(router), ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, MaxHeaderBytes: 1 << 20, } log.Println("Starting server on :8181") log.Fatal(s.ListenAndServe()) }
// NewHTTPServer starts new HTTP server over the agent func NewHTTPServer(agent *Agent, config *Config, logOutput io.Writer) (*HTTPServer, error) { // Start the listener lnAddr, err := net.ResolveTCPAddr("tcp", config.normalizedAddrs.HTTP) if err != nil { return nil, err } ln, err := config.Listener("tcp", lnAddr.IP.String(), lnAddr.Port) if err != nil { return nil, fmt.Errorf("failed to start HTTP listener: %v", err) } // If TLS is enabled, wrap the listener with a TLS listener if config.TLSConfig.EnableHTTP { tlsConf := &tlsutil.Config{ VerifyIncoming: false, VerifyOutgoing: true, VerifyServerHostname: config.TLSConfig.VerifyServerHostname, CAFile: config.TLSConfig.CAFile, CertFile: config.TLSConfig.CertFile, KeyFile: config.TLSConfig.KeyFile, } tlsConfig, err := tlsConf.IncomingTLSConfig() if err != nil { return nil, err } ln = tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, tlsConfig) } // Create the mux mux := http.NewServeMux() // Create the server srv := &HTTPServer{ agent: agent, mux: mux, listener: ln, logger: agent.logger, addr: ln.Addr().String(), } srv.registerHandlers(config.EnableDebug) // Start the server go http.Serve(ln, gziphandler.GzipHandler(mux)) return srv, nil }
// newScadaHttp creates a new HTTP server wrapping the SCADA // listener such that HTTP calls can be sent from the brokers. func newScadaHttp(agent *Agent, list net.Listener) *HTTPServer { // Create the mux mux := http.NewServeMux() // Create the server srv := &HTTPServer{ agent: agent, mux: mux, listener: list, logger: agent.logger, addr: scadaHTTPAddr, } srv.registerHandlers(false) // Never allow debug for SCADA // Start the server go http.Serve(list, gziphandler.GzipHandler(mux)) return srv }
func main() { var port = flag.String("port", "8080", "Define what TCP port to bind to") var root = flag.String("root", ".", "Define the root filesystem path") var remnant = flag.String("remnant", "http://localhost:7777/", "") flag.Parse() remnantUrl = *remnant // create our own router so that we can add our own headers to static file responses router := httprouter.New() router.HandleMethodNotAllowed = false router.GET("/proxy/*proxypath", ProxyHandler) router.NotFound = &staticFileServer{ gziphandler.GzipHandler(http.FileServer(http.Dir(*root))), } fmt.Printf("Proxying webserver: serving directory '%s' on port :%s\n", *root, *port) panic(http.ListenAndServe(":"+*port, router)) }
func main() { logger := lager.NewLogger("cnsim-server") logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG)) port := getEnv(logger, "PORT", "9000") listenAddress := getEnv(logger, "LISTEN_ADDRESS", "127.0.0.1") address := fmt.Sprintf("%s:%s", listenAddress, port) logger.Info("listen", lager.Data{"address": address}) routes := rata.Routes{ {Name: "root", Method: "GET", Path: "/"}, {Name: "steady_state", Method: "GET", Path: "/steady_state"}, } rataHandlers := rata.Handlers{ "root": &handlers.Root{ Logger: logger, }, "steady_state": gziphandler.GzipHandler(&handlers.SteadyState{ Logger: logger, Simulator: &simulate.SteadyState{ AppSizeDistribution: &distributions.GeometricWithPositiveSupport{}, }, }), } router, err := rata.NewRouter(routes, rataHandlers) if err != nil { log.Fatalf("unable to create rata Router: %s", err) // not tested } monitor := ifrit.Invoke(sigmon.New(grouper.NewOrdered(os.Interrupt, grouper.Members{ {"http_server", http_server.New(address, router)}, }))) err = <-monitor.Wait() if err != nil { log.Fatalf("ifrit: %s", err) } }
func main() { memcacheFlag := flag.String("memcache", "localhost:11211", "memcached host and port") serverFlag := flag.String("server", "", "the proxed server") portFlag := flag.Int("port", 8081, "port on which to listen") expireDurationFlag := flag.Int("expire", 600, "expire duration in seconds") numExpiresToDecayFlag := flag.Int("decay", 5, "number of expires for one decay") durationThresholdFlag := flag.Int("refresh-duration", 200, "minimum duration in ms to reload") flag.Parse() if *serverFlag == "" { fmt.Fprintf(os.Stderr, "Error: -server option not given.\n") os.Exit(1) } theMemcache = memcache.New(*memcacheFlag) cache := memcacheCache{c: theMemcache, server: *serverFlag} err := cache.c.DeleteAll() if err != nil { fmt.Fprintf(os.Stderr, "Couldn't flush memcache: %s\n", err.Error()) } theKeep = keep.NewKeep(cache, time.Duration(*expireDurationFlag)*time.Second, *numExpiresToDecayFlag, time.Duration(*durationThresholdFlag)*time.Millisecond) go theKeep.Run() http.Handle("/", gziphandler.GzipHandler(http.HandlerFunc(cacheHandler))) http.HandleFunc("/admin/keep", keepHandler) err = http.ListenAndServe(fmt.Sprintf(":%d", *portFlag), nil) if err != nil { fmt.Fprintf(os.Stderr, "Error: Listen failed: %s\n", err.Error()) os.Exit(1) } }
func main() { configurations := utils.ReadConfig() db.Configure(configurations.Database) api.Configure(configurations.Api) db.Mongo.Connect() //register middleware handle := gziphandler.GzipHandler(route.Routes()) log.Println("Starting Server...") log.Println(http.ListenAndServe(":"+strconv.Itoa(configurations.Port), handle)) /* enable for TLS support go func(){ log.Println(http.ListenAndServe(":"+strconv.Itoa(configurations.Port), handle)) }() if configurations.TLSCertFile != "" && configurations.TLSKeyFile != "" { log.Println(http.ListenAndServeTLS(":"+strconv.Itoa(configurations.TLSPort), configurations.TLSCertFile, configurations.TLSKeyFile, handle)) } */ }
// NewHTTPServer starts new HTTP server over the agent func NewHTTPServer(agent *Agent, config *Config, logOutput io.Writer) (*HTTPServer, error) { // Start the listener ln, err := config.Listener("tcp", config.Addresses.HTTP, config.Ports.HTTP) if err != nil { return nil, fmt.Errorf("failed to start HTTP listener: %v", err) } // Create the mux mux := http.NewServeMux() // Create the server srv := &HTTPServer{ agent: agent, mux: mux, listener: ln, logger: log.New(logOutput, "", log.LstdFlags), addr: ln.Addr().String(), } srv.registerHandlers(config.EnableDebug) // Start the server go http.Serve(ln, gziphandler.GzipHandler(mux)) return srv, nil }
// ServerHTTP is a function used by negroni func (c *Compress) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { newGzipHandler := gziphandler.GzipHandler(next) newGzipHandler.ServeHTTP(rw, r) }
func (s *sequins) handler() http.Handler { return gziphandler.GzipHandler(s) }
func registerGzipHandler(path string, f http.HandlerFunc) { http.Handle(path, gziphandler.GzipHandler(newExpvarHandler(path, f))) }
// Middleware provides an http.Handler hook wrapped around all requests. // In this implementation, we're using a GzipHandler middleware to // compress our responses. func (s *JSONPubService) Middleware(h http.Handler) http.Handler { return gziphandler.GzipHandler(h) }
func init() { log.SetFlags(0) router = httprouter.New() register("/", PageTemplate{FileTemplate("index.gohtml"), ""}) register("/guides/machine_setup", guideTemplate{FileTemplate("guides/01_machine_setup.gohtml"), "Machine Setup"}) register("/guides/bootcamp", guideTemplate{FileTemplate("guides/02_bootcamp.gohtml"), "Bootcamp"}) for w := 1; w <= 4; w++ { for d := 1; d <= 5; d++ { register( fmt.Sprintf("/guides/bootcamp/week-%d/day-%d", w, d), guideTemplate{ FileTemplate(fmt.Sprintf("guides/bootcamp/week-%d/day-%d.gohtml", w, d)), "Bootcamp", }, ) } } register("/books/intro", introTemplate{Template: FileTemplate("books/intro/front.gohtml")}) sections := []string{ "Getting Started", "Your First Program", "Types", "Variables", "Control Structures", "Arrays, Slices and Maps", "Functions", "Pointers", "Structs and Interfaces", "Concurrency", "Packages", "Testing", "The Core Packages", "Next Steps", } for i, section := range sections { register( fmt.Sprint("/books/intro/", i+1), introTemplate{ Template: FileTemplate(fmt.Sprint("books/intro/", i+1, ".gohtml")), Title: section, }, ) } for i := 1; i <= 14; i++ { for _, str := range []string{ fmt.Sprintf("/%d", i), fmt.Sprintf("/%d/index.htm", i), } { dst := fmt.Sprintf("/books/intro/%d", i) router.GET(str, func(res http.ResponseWriter, req *http.Request, params httprouter.Params) { http.Redirect(res, req, dst, http.StatusMovedPermanently) }) } } register("/books/web", webTemplate{Template: FileTemplate("books/web/front.gohtml")}) register("/books/web/00-01", webTemplate{Template: FileTemplate("books/web/00-01.gohtml")}) register("/books/web/01-01", webTemplate{Template: FileTemplate("books/web/01-01.gohtml")}) register("/books/web/01-02", webTemplate{Template: FileTemplate("books/web/01-02.gohtml")}) register("/books/web/01-03", webTemplate{Template: FileTemplate("books/web/01-03.gohtml")}) register("/books/web/01-04", webTemplate{Template: FileTemplate("books/web/01-04.gohtml")}) for i := 1; i < 10; i++ { register( fmt.Sprintf("/books/web/02-%02d", i), webTemplate{Template: FileTemplate(fmt.Sprintf("books/web/02-%02d.gohtml", i))}, ) } public := http.FileServer(http.Dir("public")) router.GET("/public/*path", func(res http.ResponseWriter, req *http.Request, params httprouter.Params) { path := params.ByName("path") maxAge := "3600" parts := strings.SplitN(path, ".", 3) if len(parts) == 3 { p := parts[0] + "." + parts[2] if parts[1] == getVersion("public/"+p) { path = p maxAge = "31556926" } } req.URL.Path = "/" + path res.Header().Set("Cache-Control", "max-age="+maxAge+", public") public.ServeHTTP(res, req) }) router.GET("/health", func(res http.ResponseWriter, req *http.Request, params httprouter.Params) { io.WriteString(res, "OK") }) handler := gziphandler.GzipHandler(router) http.Handle("/", handler) }
// Middleware provides a hook to add service-wide http.Handler middleware to the service. // In this example we are using it to add GZIP compression to our responses. // This method helps satisfy the server.Service interface. func (s *SavedItemsService) Middleware(h http.Handler) http.Handler { // wrap the response with our GZIP Middleware return gziphandler.GzipHandler(h) }
// Middleware provides an http.Handler hook wrapped around all requests. // In this implementation, we're using a GzipHandler middleware to // compress our responses. func (s *SimpleService) Middleware(h http.Handler) http.Handler { return gziphandler.GzipHandler(h) }
package middleware import ( "github.com/NYTimes/gziphandler" "github.com/jonasi/mohttp" "golang.org/x/net/context" "net/http" "sync" ) var GzipHandler mohttp.Handler = mohttp.HandlerFunc(func(c context.Context) { h := gziphandler.GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w2 := &addContentTypeWriter{ResponseWriter: w} c2 := mohttp.WithResponseWriter(c, w2) mohttp.Next(c2) })) h.ServeHTTP(mohttp.GetResponseWriter(c), mohttp.GetRequest(c)) }) type addContentTypeWriter struct { o sync.Once http.ResponseWriter } func (w *addContentTypeWriter) Write(b []byte) (int, error) { w.o.Do(func() { h := w.ResponseWriter.Header() if h.Get("Content-Type") == "" {
func middleware(f func(http.ResponseWriter, *http.Request)) http.Handler { return gziphandler.GzipHandler( http.HandlerFunc(f), ) }
// internalInitialize will initialize the munger. // if overrideUrl is specified, will create testUtils func (sq *SubmitQueue) internalInitialize(config *github.Config, features *features.Features, overrideUrl string) error { sq.Lock() defer sq.Unlock() // Clean up all of our flags which we wish --flag="" to mean []string{} sq.BlockingJobNames = cleanStringSlice(sq.BlockingJobNames) sq.NonBlockingJobNames = cleanStringSlice(sq.NonBlockingJobNames) sq.PresubmitJobNames = cleanStringSlice(sq.PresubmitJobNames) sq.WeakStableJobNames = cleanStringSlice(sq.WeakStableJobNames) sq.RequiredStatusContexts = cleanStringSlice(sq.RequiredStatusContexts) sq.RequiredRetestContexts = cleanStringSlice(sq.RequiredRetestContexts) sq.DoNotMergeMilestones = cleanStringSlice(sq.DoNotMergeMilestones) sq.Metadata.RepoPullUrl = fmt.Sprintf("https://github.com/%s/%s/pulls/", config.Org, config.Project) sq.Metadata.ProjectName = strings.Title(config.Project) sq.githubConfig = config // TODO: This is not how injection for tests should work. if sq.FakeE2E { sq.e2e = &fake_e2e.FakeE2ETester{ JobNames: sq.BlockingJobNames, WeakStableJobNames: sq.WeakStableJobNames, } } else { var gcs *utils.Utils if overrideUrl != "" { gcs = utils.NewTestUtils("bucket", "logs", overrideUrl) } else { gcs = utils.NewWithPresubmitDetection( sq.features.GCSInfo.BucketName, sq.features.GCSInfo.LogDir, sq.features.GCSInfo.PullKey, sq.features.GCSInfo.PullLogDir, ) } sq.e2e = (&e2e.RealE2ETester{ BlockingJobNames: sq.BlockingJobNames, NonBlockingJobNames: sq.NonBlockingJobNames, WeakStableJobNames: sq.WeakStableJobNames, BuildStatus: map[string]e2e.BuildInfo{}, GoogleGCSBucketUtils: gcs, }).Init(admin.Mux) } sq.lgtmTimeCache = mungerutil.NewLabelTimeCache(lgtmLabel) if len(config.Address) > 0 { if len(config.WWWRoot) > 0 { http.Handle("/", gziphandler.GzipHandler(http.FileServer(http.Dir(config.WWWRoot)))) } http.Handle("/prs", gziphandler.GzipHandler(http.HandlerFunc(sq.servePRs))) http.Handle("/history", gziphandler.GzipHandler(http.HandlerFunc(sq.serveHistory))) http.Handle("/github-e2e-queue", gziphandler.GzipHandler(http.HandlerFunc(sq.serveGithubE2EStatus))) http.Handle("/google-internal-ci", gziphandler.GzipHandler(http.HandlerFunc(sq.serveGoogleInternalStatus))) http.Handle("/merge-info", gziphandler.GzipHandler(http.HandlerFunc(sq.serveMergeInfo))) http.Handle("/priority-info", gziphandler.GzipHandler(http.HandlerFunc(sq.servePriorityInfo))) http.Handle("/health", gziphandler.GzipHandler(http.HandlerFunc(sq.serveHealth))) http.Handle("/health.svg", gziphandler.GzipHandler(http.HandlerFunc(sq.serveHealthSVG))) http.Handle("/sq-stats", gziphandler.GzipHandler(http.HandlerFunc(sq.serveSQStats))) http.Handle("/flakes", gziphandler.GzipHandler(http.HandlerFunc(sq.serveFlakes))) http.Handle("/metadata", gziphandler.GzipHandler(http.HandlerFunc(sq.serveMetadata))) config.ServeDebugStats("/stats") go http.ListenAndServe(config.Address, nil) } admin.Mux.HandleFunc("/api/emergency/stop", sq.EmergencyStopHTTP) admin.Mux.HandleFunc("/api/emergency/resume", sq.EmergencyStopHTTP) admin.Mux.HandleFunc("/api/emergency/status", sq.EmergencyStopHTTP) if sq.githubE2EPollTime == 0 { sq.githubE2EPollTime = githubE2EPollTime } sq.healthHistory = make([]healthRecord, 0) go sq.handleGithubE2EAndMerge() go sq.updateGoogleE2ELoop() if sq.AdminPort != 0 { go http.ListenAndServe(fmt.Sprintf("0.0.0.0:%v", sq.AdminPort), admin.Mux) } return nil }
func main() { var conf Conf err := envdecode.Decode(&conf) if err != nil { log.Fatal(err) } consumer, err := sarama.NewConsumer(strings.Split(conf.SeedBroker, ","), nil) if err != nil { panic(err) } defer func() { if err := consumer.Close(); err != nil { log.Fatalln(err) } }() listEvents := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { limit := DefaultLimit if r.Form.Get("limit") != "" { var err error limit, err = strconv.Atoi(r.Form.Get("limit")) if err != nil { log.Fatalln(err) } } // The one problem with this system is that the client always gets a // duplicate event when requesting the next page. We should probably // skip the initial event for convenience. offset := sarama.OffsetOldest if r.URL.Query().Get("sequence") != "" { var err error offset, err = strconv.ParseInt(r.URL.Query().Get("sequence"), 10, 64) if err != nil { log.Fatalln(err) } } log.Printf("Handling request limit %v offset %v", limit, offset) partitionConsumer, err := consumer.ConsumePartition(conf.KafkaTopic, 0, offset) if err != nil { panic(err) } defer func() { if err := partitionConsumer.Close(); err != nil { log.Fatalln(err) } }() var events []*map[string]interface{} firstLoop := true hasMore := true ConsumerLoop: for { select { case message := <-partitionConsumer.Messages(): // skip the first message due to overlap if offset != sarama.OffsetOldest && firstLoop { firstLoop = false break } var event map[string]interface{} err := json.Unmarshal(message.Value, &event) if err != nil { log.Fatalln(err) } // Fill the event's new `sequence` field (the public name for // "offset" in order to disambiguate from Stripe's old // offset-style pagination parameter). event["sequence"] = message.Offset events = append(events, &event) //log.Printf("Consumed message. Now have %v event(s).", len(events)) // We've fulfilled the requested limit. We're done! if len(events) >= limit { break ConsumerLoop } // Unfortunately saram doesn't currently give us a good way of // detecting the end of a topic, so detect the end by timing out // for now. // // Note that this could result in a degenerate request which is // very long as new messages continue to trickle in until we hit // max page size at a rate that's never quite enough to hit our // timeout. case <-time.After(time.Second * time.Duration(ConsumeTimeout)): log.Printf("Timeout. Probably at end of topic.\n") hasMore = false break ConsumerLoop } } page := &Page{ Data: events, HasMore: hasMore, Object: "list", URL: "/v1/events", } data, err := json.Marshal(page) if err != nil { log.Fatalln(err) } w.Write(data) log.Printf("Responded to client with %v event(s)\n", len(events)) }) listEventsGz := gziphandler.GzipHandler(listEvents) http.Handle("/v1/events", listEventsGz) log.Printf("Starting HTTP server") log.Fatal(http.ListenAndServe(":8080", nil)) }