// Main starts mc application func Main() { // Enable profiling supported modes are [cpu, mem, block]. // ``MC_PROFILER`` supported options are [cpu, mem, block]. switch os.Getenv("MC_PROFILER") { case "cpu": defer profile.Start(profile.CPUProfile, profile.ProfilePath(mustGetProfileDir())).Stop() case "mem": defer profile.Start(profile.MemProfile, profile.ProfilePath(mustGetProfileDir())).Stop() case "block": defer profile.Start(profile.BlockProfile, profile.ProfilePath(mustGetProfileDir())).Stop() } probe.Init() // Set project's root source path. probe.SetAppInfo("Release-Tag", ReleaseTag) probe.SetAppInfo("Commit", ShortCommitID) app := registerApp() app.Before = registerBefore app.ExtraInfo = func() map[string]string { if _, e := pb.GetTerminalWidth(); e != nil { globalQuiet = true } if globalDebug { return getSystemData() } return make(map[string]string) } app.RunAndExitOnError() }
func (p PkgProfile) Start() ProfilerStart { if *FLAGS.PROFILE_MEM { PROFILE = profile.Start(profile.MemProfile, profile.ProfilePath(".")) } else { PROFILE = profile.Start(profile.CPUProfile, profile.ProfilePath(".")) } return PROFILE }
func Profile(mode string) Stop { var stop Stop switch mode { case "mem": stop = profileOnExit(profile.Start(profile.MemProfile, profile.ProfilePath("."), profile.NoShutdownHook)) case "cpu": stop = profileOnExit(profile.Start(profile.CPUProfile, profile.ProfilePath("."), profile.NoShutdownHook)) case "block": stop = profileOnExit(profile.Start(profile.BlockProfile, profile.ProfilePath("."), profile.NoShutdownHook)) default: stop = stopper{} } return stop }
func activateProfiline(profileType string, dir string) { // activating switch profileType { case "cpu": defer profile.Start(profile.ProfilePath(dir), profile.CPUProfile).Stop() case "mem": defer profile.Start(profile.ProfilePath(dir), profile.MemProfile).Stop() case "block": defer profile.Start(profile.ProfilePath(dir), profile.BlockProfile).Stop() default: // do nothing } }
func start(ctx *cli.Context) { if ctx.Bool("profile") { defer profile.Start(profile.CPUProfile).Stop() defer profile.Start(profile.MemProfile).Stop() defer profile.Start(profile.BlockProfile).Stop() defer profile.Start(profile.ProfilePath(".")) } quit := make(chan bool) initLogrus(ctx) log.Info("Starting fullerite...") c, err := config.ReadConfig(ctx.String("config")) if err != nil { return } handlers := createHandlers(c) hook := NewLogErrorHook(handlers) log.Logger.Hooks.Add(hook) startHandlers(handlers) collectors := startCollectors(c) collectorStatChan := make(chan metric.CollectorEmission) internalServer := internalserver.New(c, handlerStatFunc(handlers), readCollectorStat(collectorStatChan)) go internalServer.Run() readFromCollectors(collectors, handlers, collectorStatChan) <-quit }
// Starts a profiler returns nil if profiler is not enabled, caller needs to handle this. func startProfiler(profiler string) interface { Stop() } { // Set ``MINIO_PROFILE_DIR`` to the directory where profiling information should be persisted profileDir := os.Getenv("MINIO_PROFILE_DIR") // Enable profiler if ``MINIO_PROFILER`` is set. Supported options are [cpu, mem, block]. switch profiler { case "cpu": return profile.Start(profile.CPUProfile, profile.NoShutdownHook, profile.ProfilePath(profileDir)) case "mem": return profile.Start(profile.MemProfile, profile.NoShutdownHook, profile.ProfilePath(profileDir)) case "block": return profile.Start(profile.BlockProfile, profile.NoShutdownHook, profile.ProfilePath(profileDir)) default: return nil } }
func main() { p := profile.Start(profile.MemProfile, profile.ProfilePath("."), profile.NoShutdownHook) // initialize representation index, _ = iindex.IndexDirectory("/home/rhibbitts/Dropbox/Notes/WorkNotes") // run user interface ui() p.Stop() }
func main() { p := profile.Start( profile.MemProfile, profile.ProfilePath(".")) defer p.Stop() q := queue.NewDeque() fill(q) clear(q) }
func main() { prof := profile.CPUProfile //prof := profile.MemProfile defer profile.Start(prof, profile.ProfilePath(".")).Stop() t := bptx.NewTree() for i := 0; i < 10000; i++ { x := bptx.UUID() t.Set(x, x) } t.Close() }
func main() { //prof := profile.CPUProfile prof := profile.MemProfile defer profile.Start(prof, profile.ProfilePath(".")).Stop() t := bpt.NewTree() for i := 0; i < 1000000; i++ { x := []byte(fmt.Sprintf("%x", i)) t.Set(x, x) } t.Close() }
func main() { flag.Parse() input := flag.Arg(0) output := flag.Arg(1) cwd, _ := os.Getwd() defer profile.Start(profile.ProfilePath(cwd)).Stop() m, _ := mapping.NewMap(input) r := rendering.NewRenderer(m, 5000, 5000) r.ClipToMap() r.DrawToFile(output) }
func main() { p := profile.Start( profile.MemProfile, profile.ProfilePath("."), profile.NoShutdownHook) defer p.Stop() u := &command.Unit{"Test-01", 0, 0} s := command.NewCommandStack(10) s.Do(command.NewMoveUnitCommand(u, 10, 10)) for i := 0; i < 10000000; i++ { s.Do(command.NewMoveUnitCommand(u, i*10, i*10)) } }
func main() { p := profile.Start( profile.MemProfile, profile.ProfilePath(".")) defer p.Stop() q := &queue.Queue{} for i := 0; i < 20000000; i++ { n := string(i) q.Put(n) } for i := 0; i < 20000000; i++ { if q.Get() == nil { panic("value is nil, expected *node\n") } } }
func main() { kingpin.Version(version) kingpin.Parse() logging.LogStd(fmt.Sprintf("Starting firehose-to-syslog %s ", version), true) logging.SetupLogging(*syslogServer, *debug) c := cfclient.Config{ ApiAddress: *apiEndpoint, Username: *user, Password: *password, SkipSslValidation: *skipSSLValidation, } cfClient := cfclient.NewClient(&c) if len(*dopplerEndpoint) > 0 { cfClient.Endpoint.DopplerEndpoint = *dopplerEndpoint } logging.LogStd(fmt.Sprintf("Using %s as doppler endpoint", cfClient.Endpoint.DopplerEndpoint), true) logging.LogStd("Setting up event routing!", true) err := events.SetupEventRouting(*wantedEvents) if err != nil { log.Fatal("Error setting up event routing: ", err) os.Exit(1) } //Use bolt for in-memory - file caching db, err := bolt.Open(*boltDatabasePath, 0600, &bolt.Options{Timeout: 1 * time.Second}) if err != nil { log.Fatal("Error opening bolt db: ", err) os.Exit(1) } defer db.Close() if *modeProf != "" { switch *modeProf { case "cpu": defer profile.Start(profile.CPUProfile, profile.ProfilePath(*pathProf)).Stop() case "mem": defer profile.Start(profile.MemProfile, profile.ProfilePath(*pathProf)).Stop() case "block": defer profile.Start(profile.BlockProfile, profile.ProfilePath(*pathProf)).Stop() default: // do nothing } } caching.SetCfClient(cfClient) caching.SetAppDb(db) caching.CreateBucket() //Let's Update the database the first time logging.LogStd("Start filling app/space/org cache.", true) apps := caching.GetAllApp() logging.LogStd(fmt.Sprintf("Done filling cache! Found [%d] Apps", len(apps)), true) // Ticker Pooling the CC every X sec ccPooling := time.NewTicker(*tickerTime) go func() { for range ccPooling.C { apps = caching.GetAllApp() } }() // Parse extra fields from cmd call extraFields, err := extrafields.ParseExtraFields(*extraFields) if err != nil { log.Fatal("Error parsing extra fields: ", err) os.Exit(1) } if *logEventTotals == true { events.LogEventTotals(*logEventTotalsTime, *dopplerEndpoint) } if logging.Connect() || *debug { logging.LogStd("Connected to Syslog Server! Connecting to Firehose...", true) firehose := firehose.CreateFirehoseChan(cfClient.Endpoint.DopplerEndpoint, cfClient.GetToken(), *subscriptionId, *skipSSLValidation) if firehose != nil { logging.LogStd("Firehose Subscription Succesfull! Routing events...", true) events.RouteEvents(firehose, extraFields) } else { logging.LogError("Failed connecting to Firehose...Please check settings and try again!", "") } } else { logging.LogError("Failed connecting to the Syslog Server...Please check settings and try again!", "") } }
func main() { procs := runtime.NumCPU() // packet sniffing can block an OS thread, so we need one thread // for that plus at least one more. if procs < 2 { procs = 2 } runtime.GOMAXPROCS(procs) var ( justVersion bool config mesh.Config networkConfig weave.NetworkConfig protocolMinVersion int resume bool ifaceName string routerName string nickName string password string pktdebug bool logLevel string prof string bufSzMB int noDiscovery bool httpAddr string ipamConfig ipamConfig dockerAPI string peers []string noDNS bool dnsConfig dnsConfig datapathName string trustedSubnetStr string dbPrefix string isAWSVPC bool defaultDockerHost = "unix:///var/run/docker.sock" ) if val := os.Getenv("DOCKER_HOST"); val != "" { defaultDockerHost = val } mflag.BoolVar(&justVersion, []string{"#version", "-version"}, false, "print version and exit") mflag.StringVar(&config.Host, []string{"-host"}, "", "router host") mflag.IntVar(&config.Port, []string{"#port", "-port"}, mesh.Port, "router port") mflag.IntVar(&protocolMinVersion, []string{"-min-protocol-version"}, mesh.ProtocolMinVersion, "minimum weave protocol version") mflag.BoolVar(&resume, []string{"-resume"}, false, "resume connections to previous peers") mflag.StringVar(&ifaceName, []string{"#iface", "-iface"}, "", "name of interface to capture/inject from (disabled if blank)") mflag.StringVar(&routerName, []string{"#name", "-name"}, "", "name of router (defaults to MAC of interface)") mflag.StringVar(&nickName, []string{"#nickname", "-nickname"}, "", "nickname of peer (defaults to hostname)") mflag.StringVar(&password, []string{"#password", "-password"}, "", "network password") mflag.StringVar(&logLevel, []string{"-log-level"}, "info", "logging level (debug, info, warning, error)") mflag.BoolVar(&pktdebug, []string{"#pktdebug", "#-pktdebug", "-pkt-debug"}, false, "enable per-packet debug logging") mflag.StringVar(&prof, []string{"#profile", "-profile"}, "", "enable profiling and write profiles to given path") mflag.IntVar(&config.ConnLimit, []string{"#connlimit", "#-connlimit", "-conn-limit"}, 30, "connection limit (0 for unlimited)") mflag.BoolVar(&noDiscovery, []string{"#nodiscovery", "#-nodiscovery", "-no-discovery"}, false, "disable peer discovery") mflag.IntVar(&bufSzMB, []string{"#bufsz", "-bufsz"}, 8, "capture buffer size in MB") mflag.StringVar(&httpAddr, []string{"#httpaddr", "#-httpaddr", "-http-addr"}, "", "address to bind HTTP interface to (disabled if blank, absolute path indicates unix domain socket)") mflag.StringVar(&ipamConfig.Mode, []string{"-ipalloc-init"}, "", "allocator initialisation strategy (consensus, seed or observer)") mflag.StringVar(&ipamConfig.IPRangeCIDR, []string{"#iprange", "#-iprange", "-ipalloc-range"}, "", "IP address range reserved for automatic allocation, in CIDR notation") mflag.StringVar(&ipamConfig.IPSubnetCIDR, []string{"#ipsubnet", "#-ipsubnet", "-ipalloc-default-subnet"}, "", "subnet to allocate within by default, in CIDR notation") mflag.IntVar(&ipamConfig.PeerCount, []string{"#initpeercount", "#-initpeercount", "-init-peer-count"}, 0, "number of peers in network (for IP address allocation)") mflag.StringVar(&dockerAPI, []string{"#api", "#-api", "-docker-api"}, defaultDockerHost, "Docker API endpoint") mflag.BoolVar(&noDNS, []string{"-no-dns"}, false, "disable DNS server") mflag.StringVar(&dnsConfig.Domain, []string{"-dns-domain"}, nameserver.DefaultDomain, "local domain to server requests for") mflag.StringVar(&dnsConfig.ListenAddress, []string{"-dns-listen-address"}, nameserver.DefaultListenAddress, "address to listen on for DNS requests") mflag.IntVar(&dnsConfig.TTL, []string{"-dns-ttl"}, nameserver.DefaultTTL, "TTL for DNS request from our domain") mflag.DurationVar(&dnsConfig.ClientTimeout, []string{"-dns-fallback-timeout"}, nameserver.DefaultClientTimeout, "timeout for fallback DNS requests") mflag.StringVar(&dnsConfig.EffectiveListenAddress, []string{"-dns-effective-listen-address"}, "", "address DNS will actually be listening, after Docker port mapping") mflag.StringVar(&dnsConfig.ResolvConf, []string{"-resolv-conf"}, "", "path to resolver configuration for fallback DNS lookups") mflag.StringVar(&datapathName, []string{"-datapath"}, "", "ODP datapath name") mflag.StringVar(&trustedSubnetStr, []string{"-trusted-subnets"}, "", "comma-separated list of trusted subnets in CIDR notation") mflag.StringVar(&dbPrefix, []string{"-db-prefix"}, "/weavedb/weave", "pathname/prefix of filename to store data") mflag.BoolVar(&isAWSVPC, []string{"-awsvpc"}, false, "use AWS VPC for routing") // crude way of detecting that we probably have been started in a // container, with `weave launch` --> suppress misleading paths in // mflags error messages. if os.Args[0] == "/home/weave/weaver" { // matches the Dockerfile ENTRYPOINT os.Args[0] = "weave" mflag.CommandLine.Init("weave", mflag.ExitOnError) } mflag.Parse() peers = mflag.Args() if resume && len(peers) > 0 { Log.Fatalf("You must not specify an initial peer list in conjunction with --resume") } common.SetLogLevel(logLevel) if justVersion { fmt.Printf("weave router %s\n", version) os.Exit(0) } Log.Println("Command line options:", options()) if prof != "" { defer profile.Start(profile.CPUProfile, profile.ProfilePath(prof), profile.NoShutdownHook).Stop() } if protocolMinVersion < mesh.ProtocolMinVersion || protocolMinVersion > mesh.ProtocolMaxVersion { Log.Fatalf("--min-protocol-version must be in range [%d,%d]", mesh.ProtocolMinVersion, mesh.ProtocolMaxVersion) } config.ProtocolMinVersion = byte(protocolMinVersion) if pktdebug { networkConfig.PacketLogging = packetLogging{} } else { networkConfig.PacketLogging = nopPacketLogging{} } overlay, bridge := createOverlay(datapathName, ifaceName, isAWSVPC, config.Host, config.Port, bufSzMB) networkConfig.Bridge = bridge name := peerName(routerName, bridge.Interface()) if nickName == "" { var err error nickName, err = os.Hostname() checkFatal(err) } config.Password = determinePassword(password) config.TrustedSubnets = parseTrustedSubnets(trustedSubnetStr) config.PeerDiscovery = !noDiscovery if isAWSVPC && len(config.Password) > 0 { Log.Fatalf("--awsvpc mode is not compatible with the --password option") } db, err := db.NewBoltDB(dbPrefix + "data.db") checkFatal(err) defer db.Close() router := weave.NewNetworkRouter(config, networkConfig, name, nickName, overlay, db) Log.Println("Our name is", router.Ourself) if peers, err = router.InitialPeers(resume, peers); err != nil { Log.Fatal("Unable to get initial peer set: ", err) } var dockerCli *docker.Client dockerVersion := "none" if dockerAPI != "" { dc, err := docker.NewClient(dockerAPI) if err != nil { Log.Fatal("Unable to start docker client: ", err) } else { Log.Info(dc.Info()) } dockerCli = dc dockerVersion = dockerCli.DockerVersion() } network := "" if isAWSVPC { network = "awsvpc" } checkForUpdates(dockerVersion, network) observeContainers := func(o docker.ContainerObserver) { if dockerCli != nil { if err := dockerCli.AddObserver(o); err != nil { Log.Fatal("Unable to start watcher", err) } } } isKnownPeer := func(name mesh.PeerName) bool { return router.Peers.Fetch(name) != nil } var ( allocator *ipam.Allocator defaultSubnet address.CIDR trackerName string ) if ipamConfig.Enabled() { var t tracker.LocalRangeTracker if isAWSVPC { Log.Infoln("Creating AWSVPC LocalRangeTracker") t, err = tracker.NewAWSVPCTracker() if err != nil { Log.Fatalf("Cannot create AWSVPC LocalRangeTracker: %s", err) } trackerName = "awsvpc" } allocator, defaultSubnet = createAllocator(router, ipamConfig, db, t, isKnownPeer) observeContainers(allocator) if dockerCli != nil { ids, err := dockerCli.AllContainerIDs() checkFatal(err) allocator.PruneOwned(ids) } } var ( ns *nameserver.Nameserver dnsserver *nameserver.DNSServer ) if !noDNS { ns, dnsserver = createDNSServer(dnsConfig, router.Router, isKnownPeer) observeContainers(ns) ns.Start() defer ns.Stop() dnsserver.ActivateAndServe() defer dnsserver.Stop() } router.Start() if errors := router.InitiateConnections(peers, false); len(errors) > 0 { Log.Fatal(common.ErrorMessages(errors)) } // The weave script always waits for a status call to succeed, // so there is no point in doing "weave launch --http-addr ''". // This is here to support stand-alone use of weaver. if httpAddr != "" { muxRouter := mux.NewRouter() if allocator != nil { allocator.HandleHTTP(muxRouter, defaultSubnet, trackerName, dockerCli) } if ns != nil { ns.HandleHTTP(muxRouter, dockerCli) } router.HandleHTTP(muxRouter) HandleHTTP(muxRouter, version, router, allocator, defaultSubnet, ns, dnsserver) http.Handle("/", common.LoggingHTTPHandler(muxRouter)) Log.Println("Listening for HTTP control messages on", httpAddr) go listenAndServeHTTP(httpAddr) } common.SignalHandlerLoop(router) }
func main() { defer func() { if r := recover(); r != nil { fmt.Fprintf(os.Stderr, string(sf.FormatError(fmt.Errorf("%s : %s", r, debug.Stack())))) } }() if version.Show() { fmt.Fprintf(os.Stdout, "%s\n", version.String()) return } // Enable profiling if mode is set switch options.profiling { case "cpu": defer profile.Start(profile.CPUProfile, profile.ProfilePath("."), profile.Quiet).Stop() case "mem": defer profile.Start(profile.MemProfile, profile.ProfilePath("."), profile.Quiet).Stop() case "block": defer profile.Start(profile.BlockProfile, profile.ProfilePath("."), profile.Quiet).Stop() default: // do nothing } // Register our custom Error hook log.AddHook(NewErrorHook(os.Stderr)) // Enable runtime tracing if tracing is true if options.tracing { tracing, err := os.Create(time.Now().Format("2006-01-02T150405.pprof")) if err != nil { log.Fatalf("Failed to create tracing logfile: %s", err) } defer tracing.Close() if err := trace.Start(tracing); err != nil { log.Fatalf("Failed to start tracing: %s", err) } defer trace.Stop() } // Open the log file f, err := os.OpenFile(options.logfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644) if err != nil { log.Fatalf("Failed to open the logfile %s: %s", options.logfile, err) } defer f.Close() // Initiliaze logger with default TextFormatter log.SetFormatter(&log.TextFormatter{DisableColors: true, FullTimestamp: true}) // Set the log level if options.debug { log.SetLevel(log.DebugLevel) } // SetOutput to log file and/or stdout log.SetOutput(f) if options.stdout { log.SetOutput(io.MultiWriter(os.Stdout, f)) } // Parse the -reference parameter if err = ParseReference(); err != nil { log.Fatalf(err.Error()) } // Host is either the host's UUID (if run on vsphere) or the hostname of // the system (if run standalone) host, err := sys.UUID() if host != "" { log.Infof("Using UUID (%s) for imagestore name", host) } else if options.standalone { host, err = os.Hostname() log.Infof("Using host (%s) for imagestore name", host) } if err != nil { log.Fatalf("Failed to return the UUID or host name: %s", err) } if !options.standalone { log.Debugf("Running with portlayer") // Ping the server to ensure it's at least running ok, err := PingPortLayer() if err != nil || !ok { log.Fatalf("Failed to ping portlayer: %s", err) } } else { log.Debugf("Running standalone") } // Calculate (and overwrite) the registry URL and make sure that it responds to requests options.registry, err = LearnRegistryURL(options) if err != nil { log.Fatalf("Error while pulling image: %s", err) } // Get the URL of the OAuth endpoint url, err := LearnAuthURL(options) if err != nil { log.Fatalf("Failed to obtain OAuth endpoint: %s", err) } // Get the OAuth token - if only we have a URL if url != nil { token, err := FetchToken(url) if err != nil { log.Fatalf("Failed to fetch OAuth token: %s", err) } options.token = token } // HACK: Required to learn the name of the vmdk from given reference // Used by docker personality until metadata support lands if !options.resolv { progress.Message(po, "", "Pulling from "+options.image) } // Get the manifest manifest, err := FetchImageManifest(options) if err != nil { if strings.Contains(err.Error(), "image not found") { log.Fatalf("Error: image %s not found", options.image) } else { log.Fatalf("Error while pulling image manifest: %s", err) } } // Create the ImageWithMeta slice to hold Image structs images, imageLayer, err := ImagesToDownload(manifest, host) if err != nil { log.Fatalf(err.Error()) } // HACK: Required to learn the name of the vmdk from given reference // Used by docker personality until metadata support lands if options.resolv { if len(images) > 0 { fmt.Printf("%s", images[0].meta) os.Exit(0) } os.Exit(1) } // Fetch the blobs from registry if err := DownloadImageBlobs(images); err != nil { log.Fatalf(err.Error()) } if err := CreateImageConfig(images, manifest); err != nil { log.Fatalf(err.Error()) } // Write blobs to the storage layer if err := WriteImageBlobs(images); err != nil { log.Fatalf(err.Error()) } if err := updateImageMetadata(imageLayer, manifest); err != nil { log.Fatalf(err.Error()) } progress.Message(po, "", "Digest: "+manifest.Digest) if len(images) > 0 { progress.Message(po, "", "Status: Downloaded newer image for "+options.image+":"+options.tag) } else { progress.Message(po, "", "Status: Image is up to date for "+options.image+":"+options.tag) } }
func main() { players, profilingOn, numWorkers := parseCommandLine() startTime := time.Now() if len(players) == 0 { panic("Could not find players") } // Start profiler if profilingOn { newLog.Info("Running profiler") defer profile.Start(profile.CPUProfile, profile.ProfilePath(".")).Stop() } // Create random Parent solutions to start parentSolutions := make([]Solution, numParents) for i, _ := range parentSolutions { ourPlayers := make([]Player, len(players)) copy(ourPlayers, players) randomizeTeams(ourPlayers) solutionScore, _ := ScoreSolution(ourPlayers) parentSolutions[i] = Solution{ourPlayers, solutionScore} } // Use the random starting solutions to determine the worst case for each of // our criteria PopulateWorstCases(parentSolutions) // Start our worker goroutines tasks := make(chan workerTask, numSolutionsPerRun) results := make(chan Solution, numSolutionsPerRun) for i := 0; i < numWorkers; i++ { go worker(tasks, results) } defer close(tasks) // Allow user to signal exit doneSignal := make(chan os.Signal, 1) signal.Notify(doneSignal, syscall.SIGINT) topScore := parentSolutions[0].score numRunsCompleted := 0 topScoreRunNumber := 0 for { // If we have a new best score, save and print it! if topScore != parentSolutions[0].score { topScore = parentSolutions[0].score topScoreRunNumber = numRunsCompleted if newLog.IsEnabledFor(logging.DEBUG) && numRunsCompleted > 20 { newLog.Info("\nNew top score! Run number %d. Score: %.02f", numRunsCompleted, topScore) PrintTeams(parentSolutions[0]) PrintSolutionScoring(parentSolutions[0]) } } // Create new solutions, and save the best ones newSolutions := performRun(parentSolutions, tasks, results) sort.Sort(ByScore(newSolutions)) for i, _ := range parentSolutions { parentSolutions[i] = newSolutions[i] } numRunsCompleted += 1 if timeToClose(numRunsCompleted, topScoreRunNumber, doneSignal) { break } } // Display our solution to the user topSolution := parentSolutions[0] fmt.Printf("Exiting after %d runs. Top score was found on run #%d\n", numRunsCompleted, topScoreRunNumber) PrintTeams(topSolution) PrintSolutionScoring(topSolution) newLog.Debug("Program runtime: %.02fs", time.Since(startTime).Seconds()) }
func ExampleProfilePath() { // set the location that the profile will be written to defer profile.Start(profile.ProfilePath(os.Getenv("HOME"))) }
func main() { defer profile.Start(profile.CPUProfile, profile.ProfilePath(".")).Stop() logger := log15.Root() logger.SetHandler(log15.MultiHandler( log15.LvlFilterHandler( log15.LvlInfo, log15.Must.FileHandler(filepath.Join(dir, "crawler.log"), log15.LogfmtFormat()), ), log15.LvlFilterHandler(log15.LvlError, log15.StdoutHandler), )) csv, err := os.Open(seedfile) if err != nil { log.Fatal(err) } var urls []string scanner := bufio.NewScanner(csv) for scanner.Scan() { url := scanner.Text() if !strings.HasPrefix(url, "#") { urls = append(urls, url) } } if err := scanner.Err(); err != nil { log.Fatal(err) } csv.Close() pattern := &extract.Pattern{ File: []string{ "", "*.?htm?", `/[^\.]*/`, `/.*\.(jpg|JPG|png|PNG|jpeg|JPEG|gif|GIF)/`, `/.*\.(php|jsp|aspx|asp|cgi|do)/`, "*.css", "*.js", }, // ExcludeFile: []string{ // "*.doc?", "*.xls?", "*.ppt?", // "*.pdf", "*.rar", "*.zip", // "*.ico", "*.apk", "*.exe", // "*.mp4", "*.mkv", // }, } ctrl = &Controller{ extractor: &extract.Extractor{ Matcher: extract.MustCompile(pattern), MaxDepth: 4, Pos: []struct{ Tag, Attr string }{ {"a", "href"}, {"img", "src"}, {"link", "href"}, {"script", "src"}, }, SniffFlags: extract.SniffWindowLocation, Redirect: true, SpanHosts: true, SubDomain: true, ResolveIP: true, }, downloader: &download.Downloader{ Dir: dir, }, trie: urltrie.NewHosts(threshold), count: count.NewHosts(), fingerprint: fingerprint.NewStore(0, 4, 4096), limiter: ratelimit.New(rate), logger: logger.New("worker", "controller"), } ctrl.complete.hosts = make(map[string]bool) store, err := boltstore.New(filepath.Join(dir, "bolt.db"), nil, nil) if err != nil { log.Fatal(err) } // queue, err := diskqueue.NewDefault(store.DB) queue := ratelimitq.NewWaitQueue(&ratelimitq.Option{ Limit: ctrl.Interval, Secondary: diskheap.New(store.DB, []byte("HEAP"), 16), }) go func() { http.Handle("/count/", http.HandlerFunc(handleCount)) log.Fatal(http.ListenAndServe("localhost:7869", nil)) }() cw := crawler.New(&crawler.Config{ Controller: ctrl, Logger: logger, Store: store, Queue: queue, }) if err := cw.Crawl(urls[offset-1 : offset-1+nseed]...); err != nil { log.Fatal(err) } cw.Wait() }