Ejemplo n.º 1
0
func init() {
	database := NewMemoryDatabase()

	// load config
	config, err := NewConfigFromGlobals()
	if err != nil {
		fmt.Printf("Error parsing configuration from global variables: %s", err)
		global_error = err
		return
	}

	// emailer is responsible for keeping track of how often to email the report, how to send it, and whom to send it to
	emailer := NewSMTPEmailer(
		config.EmailSMTPLogin,
		config.EmailSMTPPassword,
		config.EmailSMTPServer,
		config.EMAILSMTPPort,
		config.EmailSender,
		config.EmailRecipients,
		1*time.Minute)

	// all locking done at the handler level
	rwMutex := sync.RWMutex{}

	// standard endpoints
	http.HandleFunc("/", indexHtmlHandler)
	http.HandleFunc("/index.html", indexHtmlHandler)
	http.HandleFunc("/info", buildSumpInfoHandler(database, 2*time.Hour, &rwMutex))
	http.HandleFunc("/water-level", buildSumpRegisterLevelsHandler(database, PanicWaterLevel, emailer, config.ServerSecret, &rwMutex))

	// profiler endpoints
	profiler.AddMemoryProfilingHandlers()
}
Ejemplo n.º 2
0
func main() {
	// add handlers to help us track memory usage - they don't track memory until they're told to
	profiler.AddMemoryProfilingHandlers()

	// Uncomment if you want to start profiling automatically
	profiler.StartProfiling()

	// listen on port 6060 (pick a port)
	http.ListenAndServe(":6060", nil)
}
Ejemplo n.º 3
0
func main() {
	// add our "Hello, World!" endpoint to the default ServeMux
	http.HandleFunc("/", helloHandler)

	// add the profiler endpoints to the default ServeMux
	profiler.AddMemoryProfilingHandlers()

	// start the service
	log.Println("Starting service on :8080")
	log.Fatal(http.ListenAndServe(":8080", nil))
}
Ejemplo n.º 4
0
func main() {
	u := universe.NewCache(universe.Config{
		BerksOnly: config.Opts.BerksOnly,
	})

	// add handlers to help us track memory usage - they don't track memory until they're told to
	profiler.AddMemoryProfilingHandlers()
	profiler.RegisterExtraServiceInfoRetriever(u.ServiceInfo)
	profiler.StartProfiling()

	ghsConfig := gh.Config{
		Token:        config.Opts.Token,
		Org:          config.Opts.Org,
		SyncInterval: config.Opts.SyncInterval,
		MaxVersions:  config.Opts.MaxGithubVersions,
	}
	ghs := gh.New(ghsConfig, u)
	var wg sync.WaitGroup
	wg.Add(1) // we want to do some stuff after github is done, this is so we can block on the gh repo processing before we do something like add in other sources

	// Add supermarket to the universe after github
	d, _ := time.ParseDuration("6h") // not exposed as a tunable yet
	conf := supermarket.Config{SyncInterval: d}
	sm := supermarket.New(conf, u)

	// ye yea hate the ! no sync, but default behavior should be to sync
	if !config.Opts.NoSync {
		go func() {
			if err := ghs.Sync(); err != nil {
				log.Printf("Error syncing github org %s %s", ghs.Conf.Org, err.Error())
			} else {
				wg.Done()
			}
		}()
	}
	go periodicSync(ghs)
	go periodicSync(sm)

	// create our custom http server/router
	router := httprouter.New()
	server := &http.Server{
		Addr:    fmt.Sprintf("%s:%d", config.Opts.Bind, config.Opts.Port),
		Handler: Log(router),
	}

	ghook = hook.NewServer()
	ghook.Secret = config.Opts.Secret
	ghook.Path = "/hook"

	router.GET("/", rootHandler)
	// can see a post or refresh or purge going here as well
	router.GET("/cookbook/:cook", u.CookBookHandler)

	router.HandlerFunc("GET", "/profiler/info.html", profiler.MemStatsHTMLHandler)
	router.HandlerFunc("GET", "/profiler/info", profiler.ProfilingInfoJSONHandler)
	router.HandlerFunc("GET", "/profiler/start", profiler.StartProfilingHandler)
	router.HandlerFunc("GET", "/profiler/stop", profiler.StopProfilingHandler)

	// github rate info
	router.HandlerFunc("GET", "/rate", ghs.RateHandler)
	// throw these standard handlers in the router
	router.HandlerFunc("GET", "/universe", u.Handler)
	router.HandlerFunc("PUT", "/hook", ghook.ServeHTTP)
	router.HandlerFunc("POST", "/hook", ghook.ServeHTTP)

	// TODO:(jnelson) the EventProcessor is tied to a gh source, but really it should be not a global event stream, but configerd per-org since the gh sources can be multi-org.
	//                Potentially we should setup a WebHook on the Source interface, so each source could have a configured hook/hook path.. Sun Sep 20 03:25:05 2015
	go ghs.EventProcessor(ghook.Events)

	if !config.Opts.NoSync {
		wg.Wait()                         // make sure we have processed everything from github before other sources
		if err := sm.Sync(); err != nil { // sync supermarket
			log.Println("Error syncing supermarket ", err)
		}
	}

	log.Printf("starting server on %s:%d\n", config.Opts.Bind, config.Opts.Port)
	if config.Opts.TLS {
		log.Fatal(server.ListenAndServeTLS(config.Opts.Cert, config.Opts.Key))
	} else {
		log.Fatal(server.ListenAndServe())
	}
}
Ejemplo n.º 5
0
func main() {

	flagWorker := flag.Int("workers", 8, "Number of concurrent workers")
	flagMaxSize := flag.String("max", "10MB", "Max size of files to scan")
	flagWait := flag.Int("wait", 5, "Wait delay before completion")
	//flagConfig := flag.String("config", "config.cfg", "Config file")
	flagDebug := flag.Bool("debug", false, "Debugging")
	flag.Parse()

	if flag.NArg() != 1 {
		myUsage()
		os.Exit(1)
	}

	startDir := filepath.FromSlash(flag.Arg(0))

	if strings.HasSuffix(startDir, string(filepath.Separator)) == false {
		startDir += string(filepath.Separator)
	}

	if *flagDebug {
		profiler.AddMemoryProfilingHandlers()
		profiler.StartProfiling()
		go http.ListenAndServe(":6060", nil)
	}

	//loadConfig(*flagConfig)
	var err error
	config.MaxFileSize, err = ToBytes(*flagMaxSize)
	testErrDie("Error parsing max size", err)

	config.MaxThreads = *flagWorker
	config.WaitDelay = *flagWait

	loadRegexps("regexps.cfg")

	reportChan := make(chan FileReport)
	var reportWg sync.WaitGroup
	reportWg.Add(1)
	go createReport(reportChan, &reportWg)

	workChannel := make(chan string)
	dispChan := make(chan string, 3)

	var workerWg sync.WaitGroup
	workerWg.Add(config.MaxThreads)

	now := time.Now()
	for i := 0; i < config.MaxThreads; i++ {
		go worker(workChannel, reportChan, dispChan, &workerWg)
	}

	go dispatcher(dispChan, workChannel)
	dispChan <- startDir
	workerWg.Wait()
	fmt.Println("Dirs:", nrOfDirs.nr)
	fmt.Println("Files:", nrOfFiles.nr)
	close(reportChan)
	reportWg.Wait()
	fmt.Println(time.Since(now))
}