func main() {

	var data = flag.String("data", "", "The data directory where WOF data lives, required")
	var cache_size = flag.Int("cache_size", 1024, "The number of WOF records with large geometries to cache")
	var cache_trigger = flag.Int("cache_trigger", 2000, "The minimum number of coordinates in a WOF record that will trigger caching")

	var lat = flag.Float64("latitude", 0.0, "")
	var lon = flag.Float64("longitude", 0.0, "")

	var loglevel = flag.String("loglevel", "info", "...")

	flag.Parse()

	logger := log.NewWOFLogger("[wof-breaches] ")
	logger.AddLogger(os.Stdout, *loglevel)

	idx, _ := rtree.NewIndex(*data, *cache_size, *cache_trigger, logger)

	for _, path := range flag.Args() {

		logger.Info("indexing %s", path)
		idx.IndexGeoJSONFile(path)
	}

	results := idx.GetIntersectsByLatLon(*lat, *lon)
	inflated := idx.InflateSpatialResults(results)

	for _, r := range inflated {
		logger.Info("%v", r)
	}
}
예제 #2
0
func NewPointInPolygonSimple(source string) (*WOFPointInPolygon, error) {

	cache_size := 100
	cache_trigger := 1000

	logger := log.NewWOFLogger("[wof-pip-server]")
	logger.AddLogger(os.Stdout, "debug")

	return NewPointInPolygon(source, cache_size, cache_trigger, logger)
}
func main() {

	var data = flag.String("data", "", "The data directory where WOF data lives, required")
	var cache_size = flag.Int("cache_size", 1024, "The number of WOF records with large geometries to cache")
	var cache_trigger = flag.Int("cache_trigger", 2000, "The minimum number of coordinates in a WOF record that will trigger caching")
	var loglevel = flag.String("loglevel", "debug", "...")
	var subject_id = flag.Int("subject", 0, "...")
	var clipping_id = flag.Int("clipping", 0, "...")

	flag.Parse()

	logger := log.NewWOFLogger("[wof-breaches] ")
	logger.AddLogger(os.Stdout, *loglevel)

	idx, _ := breaches.NewIndex(*data, *cache_size, *cache_trigger, logger)

	subject_path := utils.Id2AbsPath(*data, *subject_id)
	clipping_path := utils.Id2AbsPath(*data, *clipping_id)

	idx.IndexGeoJSONFile(subject_path)

	subject, _ := idx.LoadGeoJSON(subject_path)
	clipping, _ := idx.LoadGeoJSON(clipping_path)

	logger.Info("does %s (clipping) breach %s (subject)", clipping.WOFName(), subject.WOFName())

	t1 := time.Now()

	results, _ := idx.Breaches(clipping)

	if len(results) == 0 {
		logger.Info("%s (clipping) DOES NOT breach %s (subject)", clipping.WOFName(), subject.WOFName())
	}

	for _, r := range results {

		subject_path := utils.Id2AbsPath(*data, r.Id)
		subject_feature, _ := idx.LoadGeoJSON(subject_path)

		logger.Info("%s (%d) breaches %s (%d)", clipping.WOFName(), clipping.WOFId(), subject_feature.WOFName(), subject_feature.WOFId())
	}

	t2 := time.Since(t1)
	logger.Info("time to search %v", t2)

}
func main() {

	var port = flag.Int("port", 8080, "Port to listen")
	var min = flag.Int("min", 5, "The minimum number of Brooklyn Integers to keep on hand at all times")
	var loglevel = flag.String("loglevel", "info", "Log level")
	var cors = flag.Bool("cors", false, "Enable CORS headers")

	flag.Parse()

	writer := io.MultiWriter(os.Stdout)

	logger := log.NewWOFLogger("[big-integer] ")
	logger.AddLogger(writer, *loglevel)

	proxy := NewProxy(int64(*min), logger)
	proxy.Init()

	handler := func(rsp http.ResponseWriter, r *http.Request) {

		i, err := proxy.Integer()

		if err != nil {
			msg := fmt.Sprintf("Failed to retrieve integer because %v", err)
			http.Error(rsp, msg, http.StatusBadRequest)
		}

		if *cors {
			rsp.Header().Set("Access-Control-Allow-Origin", "*")
			return
		}

		io.WriteString(rsp, strconv.Itoa(int(i)))
	}

	http.HandleFunc("/", handler)

	str_port := ":" + strconv.Itoa(*port)
	err := http.ListenAndServe(str_port, nil)

	if err != nil {
		logger.Fatal("Failed to start server, because %v\n", err)
	}

}
func main() {

	var host = flag.String("host", "localhost", "The hostname to listen for requests on")
	var port = flag.Int("port", 8080, "The port number to listen for requests on")
	var data = flag.String("data", "", "The data directory where WOF data lives, required")
	var cache_size = flag.Int("cache_size", 1024, "The number of WOF records with large geometries to cache")
	var cache_trigger = flag.Int("cache_trigger", 2000, "The minimum number of coordinates in a WOF record that will trigger caching")
	var strict = flag.Bool("strict", false, "Enable strict placetype checking")
	var logs = flag.String("logs", "", "Where to write logs to disk")
	var metrics = flag.String("metrics", "", "Where to write (@rcrowley go-metrics style) metrics to disk")
	var format = flag.String("metrics-as", "plain", "Format metrics as... ? Valid options are \"json\" and \"plain\"")
	var cors = flag.Bool("cors", false, "Enable CORS headers")
	var verbose = flag.Bool("verbose", false, "Enable verbose logging, or log level \"info\"")
	var verboser = flag.Bool("verboser", false, "Enable really verbose logging, or log level \"debug\"")

	flag.Parse()
	args := flag.Args()

	if *data == "" {
		panic("missing data")
	}

	_, err := os.Stat(*data)

	if os.IsNotExist(err) {
		panic("data does not exist")
	}

	loglevel := "status"

	if *verbose {
		loglevel = "info"
	}

	if *verboser {
		loglevel = "debug"
	}

	var l_writer io.Writer
	var m_writer io.Writer

	l_writer = io.MultiWriter(os.Stdout)

	if *logs != "" {

		l_file, l_err := os.OpenFile(*logs, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0660)

		if l_err != nil {
			panic(l_err)
		}

		l_writer = io.MultiWriter(os.Stdout, l_file)
	}

	logger := log.NewWOFLogger("[wof-pip-server] ")
	logger.AddLogger(l_writer, loglevel)

	p, p_err := pip.NewPointInPolygon(*data, *cache_size, *cache_trigger, logger)

	if p_err != nil {
		panic(p_err)
	}

	if *metrics != "" {

		m_file, m_err := os.OpenFile(*metrics, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0660)

		if m_err != nil {
			panic(m_err)
		}

		m_writer = io.MultiWriter(m_file)
		_ = p.SendMetricsTo(m_writer, 60e9, *format)
	}

	t1 := time.Now()

	for _, path := range args {
		p.IndexMetaFile(path)
	}

	t2 := float64(time.Since(t1)) / 1e9

	p.Logger.Status("indexed %d records in %.3f seconds", p.Rtree.Size(), t2)

	for pt, count := range p.Placetypes {
		p.Logger.Status("indexed %s: %d", pt, count)
	}

	handler := func(rsp http.ResponseWriter, req *http.Request) {

		query := req.URL.Query()

		str_lat := query.Get("latitude")
		str_lon := query.Get("longitude")
		placetype := query.Get("placetype")

		if str_lat == "" {
			http.Error(rsp, "Missing latitude parameter", http.StatusBadRequest)
			return
		}

		if str_lon == "" {
			http.Error(rsp, "Missing longitude parameter", http.StatusBadRequest)
			return
		}

		lat, lat_err := strconv.ParseFloat(str_lat, 64)
		lon, lon_err := strconv.ParseFloat(str_lon, 64)

		if lat_err != nil {
			http.Error(rsp, "Invalid latitude parameter", http.StatusBadRequest)
			return
		}

		if lon_err != nil {
			http.Error(rsp, "Invalid longitude parameter", http.StatusBadRequest)
			return
		}

		if lat > 90.0 || lat < -90.0 {
			http.Error(rsp, "E_IMPOSSIBLE_LATITUDE", http.StatusBadRequest)
			return
		}

		if lon > 180.0 || lon < -180.0 {
			http.Error(rsp, "E_IMPOSSIBLE_LONGITUDE", http.StatusBadRequest)
			return
		}

		if placetype != "" {

			if *strict && !p.IsKnownPlacetype(placetype) {
				http.Error(rsp, "Unknown placetype", http.StatusBadRequest)
				return
			}
		}

		results, timings := p.GetByLatLonForPlacetype(lat, lon, placetype)

		count := len(results)
		ttp := 0.0

		for _, t := range timings {
			ttp += t.Duration
		}

		if placetype != "" {
			p.Logger.Debug("time to reverse geocode %f, %f @%s: %d results in %f seconds ", lat, lon, placetype, count, ttp)
		} else {
			p.Logger.Debug("time to reverse geocode %f, %f: %d results in %f seconds ", lat, lon, count, ttp)
		}

		js, err := json.Marshal(results)

		if err != nil {
			http.Error(rsp, err.Error(), http.StatusInternalServerError)
			return
		}

		// maybe this although it seems like it adds functionality for a lot of
		// features this server does not need - https://github.com/rs/cors
		// (20151022/thisisaaronland)

		if *cors {
			rsp.Header().Set("Access-Control-Allow-Origin", "*")
		}

		rsp.Header().Set("Content-Type", "application/json")
		rsp.Write(js)
	}

	endpoint := fmt.Sprintf("%s:%d", *host, *port)

	http.HandleFunc("/", handler)
	http.ListenAndServe(endpoint, nil)
}
func main() {

	var root = flag.String("root", "", "The directory where your Who's On First data is stored")
	var bucket = flag.String("bucket", "", "The S3 bucket to sync <root> to")
	var prefix = flag.String("prefix", "", "A prefix inside your S3 bucket where things go")
	var list = flag.String("file-list", "", "A single file containing a list of files to sync")
	var debug = flag.Bool("debug", false, "Be chatty")
	var dryrun = flag.Bool("dryrun", false, "Go through the motions but don't actually clone anything")
	var tidy = flag.Bool("tidy", false, "Remove -file-list file, if present")
	var credentials = flag.String("credentials", "", "Your S3 credentials file")
	var procs = flag.Int("processes", (runtime.NumCPU() * 2), "The number of concurrent processes to sync data with")
	var loglevel = flag.String("loglevel", "info", "Log level for reporting")
	var slack = flag.Bool("slack", false, "Send status updates to Slack (via slackcat)")
	var slack_config = flag.String("slack-config", "", "The path to your slackcat config")

	flag.Parse()

	if *root == "" {
		panic("missing root")
	}

	_, err := os.Stat(*root)

	if os.IsNotExist(err) {
		panic("root does not exist")
	}

	if *bucket == "" {
		panic("missing bucket")
	}

	if *credentials != "" {
		os.Setenv("AWS_CREDENTIAL_FILE", *credentials)
	}

	if *debug || *dryrun {
		*loglevel = "debug"
	}

	logger := log.NewWOFLogger("[wof-sync-files] ")

	writer := io.MultiWriter(os.Stdout)
	logger.AddLogger(writer, *loglevel)

	s := s3.WOFSync(*bucket, *prefix, *procs, *debug, logger)
	s.MonitorStatus()

	if *dryrun {
		s.Dryrun = true
	}

	if *list == "" {
		args := flag.Args()
		s.SyncFiles(args, *root)
	} else {

		_, err := os.Stat(*list)

		if os.IsNotExist(err) {
			panic(err)
		}

		s.SyncFileList(*list, *root)

		if !*debug && *tidy {
			os.Remove(*list)
		}
	}

	if *slack {

		sl, err := slackcat.NewWriter(*slack_config)

		if err != nil {
			logger.Warning("failed to create slackcat writer, because %v", err)
		} else {

			logger.AddLogger(sl, "status")

			logger.Status(s.StatusReport())
			logger.Status("Time to process %v", s.TimeToProcess)
		}
	}
}
func main() {

	/*
	   This still lacks metrics collection of strict placetype checking
	   (20151203/thisisaaronland)
	*/

	var port = flag.Int("port", 9988, "The port number to listen for requests on")
	var data = flag.String("data", "", "The data directory where WOF data lives, required")
	var cache_size = flag.Int("cache_size", 1024, "The number of WOF records with large geometries to cache")
	var cache_trigger = flag.Int("cache_trigger", 2000, "The minimum number of coordinates in a WOF record that will trigger caching")
	// var strict = flag.Bool("strict", false, "Enable strict placetype checking")
	var logs = flag.String("logs", "", "Where to write logs to disk")
	// var metrics = flag.String("metrics", "", "Where to write (@rcrowley go-metrics style) metrics to disk")
	// var format = flag.String("metrics-as", "plain", "Format metrics as... ? Valid options are \"json\" and \"plain\"")
	var cors = flag.Bool("cors", false, "Enable CORS headers")
	var loglevel = flag.String("loglevel", "info", "")

	flag.Parse()
	args := flag.Args()

	if *data == "" {
		panic("missing data")
	}

	_, err := os.Stat(*data)

	if os.IsNotExist(err) {
		panic("data does not exist")
	}

	l_writer := io.MultiWriter(os.Stdout)

	if *logs != "" {

		l_file, l_err := os.OpenFile(*logs, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0660)

		if l_err != nil {
			panic(l_err)
		}

		l_writer = io.MultiWriter(os.Stdout, l_file)
	}

	logger := log.NewWOFLogger("[wof-breach-server] ")
	logger.AddLogger(l_writer, *loglevel)

	idx, _ := breaches.NewIndex(*data, *cache_size, *cache_trigger, logger)

	for _, meta := range args {
		logger.Info("indexing %s", meta)
		idx.IndexMetaFile(meta)
	}

	logger.Info("indexing meta files complete")

	var lookups int64
	lookups = 0

	handler := func(rsp http.ResponseWriter, req *http.Request) {

		query := req.URL.Query()

		str_wofid := query.Get("wofid")

		if str_wofid == "" {
			http.Error(rsp, "Missing wofid parameter", http.StatusBadRequest)
			return
		}

		wofid, err := strconv.Atoi(str_wofid)

		if err != nil {
			http.Error(rsp, "Unable to parse WOF ID", http.StatusBadRequest)
			return
		}

		path := utils.Id2AbsPath(*data, wofid)
		feature, err := idx.LoadGeoJSON(path)

		if err != nil {
			http.Error(rsp, "Invalid WOF ID", http.StatusBadRequest)
			return
		}

		logger.Info("looking up %d (%d)", feature.WOFId(), lookups)

		t1 := time.Now()
		atomic.AddInt64(&lookups, 1)

		results, err := idx.Breaches(feature)

		atomic.AddInt64(&lookups, -1)
		t2 := time.Since(t1)

		logger.Info("time to lookup %d : %v", feature.WOFId(), t2)

		if err != nil {
			http.Error(rsp, err.Error(), http.StatusInternalServerError)
			return
		}

		logger.Info("%d results breach %d", len(results), feature.WOFId())

		js, err := json.Marshal(results)

		if err != nil {
			http.Error(rsp, err.Error(), http.StatusInternalServerError)
			return
		}

		if *cors {
			rsp.Header().Set("Access-Control-Allow-Origin", "*")
		}

		rsp.Header().Set("Content-Type", "application/json")
		rsp.Write(js)
	}

	str_port := fmt.Sprintf(":%d", *port)

	http.HandleFunc("/", handler)
	http.ListenAndServe(str_port, nil)
}
func main() {

	var data = flag.String("data", "", "The data directory where WOF data lives, required")
	var cache_size = flag.Int("cache_size", 1024, "The number of WOF records with large geometries to cache")
	var cache_trigger = flag.Int("cache_trigger", 2000, "The minimum number of coordinates in a WOF record that will trigger caching")
	var loglevel = flag.String("loglevel", "info", "...")

	flag.Parse()
	args := flag.Args()

	logger := log.NewWOFLogger("[wof-breaches] ")
	logger.AddLogger(os.Stdout, *loglevel)

	idx, _ := breaches.NewIndex(*data, *cache_size, *cache_trigger, logger)

	for _, meta := range args {
		logger.Info("indexing %s", meta)
		idx.IndexMetaFile(meta)
	}

	logger.Info("indexing meta files complete")

	scanner := bufio.NewScanner(os.Stdin)

	for scanner.Scan() {

		id := scanner.Text()

		if id == "" {
			continue
		}

		wofid, err := strconv.Atoi(id)

		if err != nil {
			logger.Error("failed to convert %s to a WOF ID, because %v", id, err)
			continue
		}

		path := utils.Id2AbsPath(*data, wofid)
		feature, err := idx.LoadGeoJSON(path)

		if err != nil {
			logger.Error("Failed to read %s, because %v", path, err)
			continue
		}

		// Note the WOFID-iness below - please to be waiting for this
		// to get pushed to get resolved (20151125/thisisaaronland)
		// https://github.com/whosonfirst/go-whosonfirst-geojson/issues/2

		logger.Info("what records does %s (%s) breach?", feature.WOFName(), path)

		t1 := time.Now()

		results, err := idx.Breaches(feature)

		if err != nil {
			logger.Error("Failed to determined breaches for %s (%d), because %v", feature.WOFName(), feature.WOFId(), err)
			continue
		}

		count := len(results)

		if count == 0 {
			logger.Status("%s does not breach any other records in the index", feature.WOFName())
		} else if count == 1 {
			logger.Status("%s breaches one other record in the index", feature.WOFName())
		} else {
			logger.Status("%s breaches %d other records in the index", feature.WOFName(), count)
		}

		for _, r := range results {

			other_path := utils.Id2AbsPath(*data, r.Id)
			other_feature, _ := idx.LoadGeoJSON(other_path)

			logger.Info("%s (%d) breaches %s (%d)", feature.WOFName(), feature.WOFId(), other_feature.WOFName(), other_feature.WOFId())
		}

		t2 := time.Since(t1)
		logger.Info("time to search %v", t2)
	}

}
func main() {

	var host = flag.String("host", "localhost", "The hostname to listen for requests on")
	var port = flag.Int("port", 8080, "The port number to listen for requests on")
	var data = flag.String("data", "", "The data directory where WOF data lives, required")
	var cache_all = flag.Bool("cache_all", false, "Just cache everything, regardless of size")
	var cache_size = flag.Int("cache_size", 1024, "The number of WOF records with large geometries to cache")
	var cache_trigger = flag.Int("cache_trigger", 2000, "The minimum number of coordinates in a WOF record that will trigger caching")
	var strict = flag.Bool("strict", false, "Enable strict placetype checking")
	var loglevel = flag.String("loglevel", "info", "Log level for reporting")
	var logs = flag.String("logs", "", "Where to write logs to disk")
	var metrics = flag.String("metrics", "", "Where to write (@rcrowley go-metrics style) metrics to disk")
	var format = flag.String("metrics-as", "plain", "Format metrics as... ? Valid options are \"json\" and \"plain\"")
	var cors = flag.Bool("cors", false, "Enable CORS headers")
	var procs = flag.Int("procs", (runtime.NumCPU() * 2), "The number of concurrent processes to clone data with")
	var pidfile = flag.String("pidfile", "", "Where to write a PID file for wof-pip-server. If empty the PID file will be written to wof-pip-server.pid in the current directory")
	var nopid = flag.Bool("nopid", false, "Do not try to write a PID file")

	flag.Parse()
	args := flag.Args()

	if *data == "" {
		panic("missing data")
	}

	_, err := os.Stat(*data)

	if os.IsNotExist(err) {
		panic("data does not exist")
	}

	runtime.GOMAXPROCS(*procs)

	var l_writer io.Writer
	var m_writer io.Writer

	l_writer = io.MultiWriter(os.Stdout)

	if *logs != "" {

		l_file, l_err := os.OpenFile(*logs, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0660)

		if l_err != nil {
			panic(l_err)
		}

		l_writer = io.MultiWriter(os.Stdout, l_file)
	}

	logger := log.NewWOFLogger("[wof-pip-server] ")
	logger.AddLogger(l_writer, *loglevel)

	if *cache_all {

		*cache_size = 0
		*cache_trigger = 1

		mu := new(sync.Mutex)
		wg := new(sync.WaitGroup)

		for _, path := range args {

			wg.Add(1)

			go func(path string) {
				defer wg.Done()

				count := 0

				fh, err := os.Open(path)

				if err != nil {
					logger.Error("failed to open %s for reading, because %v", path, err)
					os.Exit(1)
				}

				scanner := bufio.NewScanner(fh)

				for scanner.Scan() {
					count += 1
				}

				mu.Lock()
				*cache_size += count
				mu.Unlock()

			}(path)
		}

		wg.Wait()

		logger.Status("set cache_size to %d and cache_trigger to %d", *cache_size, *cache_trigger)
	}

	p, p_err := pip.NewPointInPolygon(*data, *cache_size, *cache_trigger, logger)

	if p_err != nil {
		panic(p_err)
	}

	if *metrics != "" {

		m_file, m_err := os.OpenFile(*metrics, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0660)

		if m_err != nil {
			panic(m_err)
		}

		m_writer = io.MultiWriter(m_file)
		_ = p.SendMetricsTo(m_writer, 60e9, *format)
	}

	indexing := true
	ch := make(chan bool)

	go func() {
		<-ch
		indexing = false
	}()

	go func() {

		if *nopid {

			t1 := time.Now()

			for _, path := range args {
				p.Logger.Status("indexing %s", path)
				p.IndexMetaFile(path)
			}

			t2 := float64(time.Since(t1)) / 1e9
			p.Logger.Status("indexed %d records in %.3f seconds", p.Rtree.Size(), t2)

			ch <- true
			return
		}

		if *pidfile == "" {

			cwd, err := os.Getwd()

			if err != nil {
				panic(err)
			}

			fname := fmt.Sprintf("%s.pid", os.Args[0])

			*pidfile = filepath.Join(cwd, fname)
		}

		fh, err := os.Create(*pidfile)

		if err != nil {
			panic(err)
		}

		defer fh.Close()

		t1 := time.Now()

		for _, path := range args {
			p.Logger.Status("indexing %s", path)
			p.IndexMetaFile(path)
		}

		t2 := float64(time.Since(t1)) / 1e9
		p.Logger.Status("indexed %d records in %.3f seconds", p.Rtree.Size(), t2)

		pid := os.Getpid()
		strpid := strconv.Itoa(pid)

		fh.Write([]byte(strpid))

		p.Logger.Status("create PID file %s", *pidfile)

		sigs := make(chan os.Signal, 1)
		signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)

		go func() {
			<-sigs

			p.Logger.Status("remove PID file %s", *pidfile)

			os.Remove(*pidfile)
			os.Exit(0)
		}()

		ch <- true
	}()

	handler := func(rsp http.ResponseWriter, req *http.Request) {

		if indexing == true {
			http.Error(rsp, "indexing records", http.StatusServiceUnavailable)
			return
		}

		query := req.URL.Query()

		str_lat := query.Get("latitude")
		str_lon := query.Get("longitude")
		placetype := query.Get("placetype")
		excluded := query["exclude"] // see the way we're accessing the map directly to get a list? yeah, that

		if str_lat == "" {
			http.Error(rsp, "Missing latitude parameter", http.StatusBadRequest)
			return
		}

		if str_lon == "" {
			http.Error(rsp, "Missing longitude parameter", http.StatusBadRequest)
			return
		}

		lat, lat_err := strconv.ParseFloat(str_lat, 64)
		lon, lon_err := strconv.ParseFloat(str_lon, 64)

		if lat_err != nil {
			http.Error(rsp, "Invalid latitude parameter", http.StatusBadRequest)
			return
		}

		if lon_err != nil {
			http.Error(rsp, "Invalid longitude parameter", http.StatusBadRequest)
			return
		}

		if lat > 90.0 || lat < -90.0 {
			http.Error(rsp, "E_IMPOSSIBLE_LATITUDE", http.StatusBadRequest)
			return
		}

		if lon > 180.0 || lon < -180.0 {
			http.Error(rsp, "E_IMPOSSIBLE_LONGITUDE", http.StatusBadRequest)
			return
		}

		filters := pip.WOFPointInPolygonFilters{}

		if placetype != "" {

			if *strict && !p.IsKnownPlacetype(placetype) {
				http.Error(rsp, "Unknown placetype", http.StatusBadRequest)
				return
			}

			filters["placetype"] = placetype
		}

		for _, what := range excluded {

			if what == "deprecated" || what == "superseded" {
				filters[what] = false
			}
		}

		results, timings := p.GetByLatLonFiltered(lat, lon, filters)

		count := len(results)
		ttp := 0.0

		for _, t := range timings {
			ttp += t.Duration
		}

		if placetype != "" {
			p.Logger.Debug("time to reverse geocode %f, %f @%s: %d results in %f seconds ", lat, lon, placetype, count, ttp)
		} else {
			p.Logger.Debug("time to reverse geocode %f, %f: %d results in %f seconds ", lat, lon, count, ttp)
		}

		js, err := json.Marshal(results)

		if err != nil {
			http.Error(rsp, err.Error(), http.StatusInternalServerError)
			return
		}

		// maybe this although it seems like it adds functionality for a lot of
		// features this server does not need - https://github.com/rs/cors
		// (20151022/thisisaaronland)

		if *cors {
			rsp.Header().Set("Access-Control-Allow-Origin", "*")
		}

		rsp.Header().Set("Content-Type", "application/json")
		rsp.Write(js)
	}

	endpoint := fmt.Sprintf("%s:%d", *host, *port)

	mux := http.NewServeMux()
	mux.HandleFunc("/", handler)

	gracehttp.Serve(&http.Server{Addr: endpoint, Handler: mux})

	os.Exit(0)
}
예제 #10
0
func main() {

	var root = flag.String("root", "", "The directory to sync")
	var bucket = flag.String("bucket", "", "The S3 bucket to sync <root> to")
	var prefix = flag.String("prefix", "", "A prefix inside your S3 bucket where things go")
	var debug = flag.Bool("debug", false, "Don't actually try to sync anything and spew a lot of line noise")
	var dryrun = flag.Bool("dryrun", false, "Go through the motions but don't actually clone anything")
	var credentials = flag.String("credentials", "", "Your S3 credentials file")
	var procs = flag.Int("processes", (runtime.NumCPU() * 2), "The number of concurrent processes to sync data with")
	var loglevel = flag.String("loglevel", "info", "Log level for reporting")
	var slack = flag.Bool("slack", false, "Send status updates to Slack (via slackcat)")
	var slack_config = flag.String("slack-config", "", "The path to your slackcat config")

	flag.Parse()

	if *root == "" {
		golog.Fatal("missing root to sync")
	}

	_, err := os.Stat(*root)

	if os.IsNotExist(err) {
		golog.Fatal("root does not exist")
	}

	if *bucket == "" {
		golog.Fatal("missing bucket")
	}

	if *credentials != "" {
		os.Setenv("AWS_CREDENTIAL_FILE", *credentials)
	}

	if *debug || *dryrun {
		*loglevel = "debug"
	}

	logger := log.NewWOFLogger("[wof-sync-dirs] ")

	writer := io.MultiWriter(os.Stdout)
	logger.AddLogger(writer, *loglevel)

	s := s3.WOFSync(*bucket, *prefix, *procs, *debug, logger)

	if *dryrun {
		s.Dryrun = true
	}

	s.MonitorStatus()
	err = s.SyncDirectory(*root)

	if *slack {

		sl, err := slackcat.NewWriter(*slack_config)

		if err != nil {
			logger.Warning("failed to create slackcat writer, because %v", err)
		} else {

			logger.AddLogger(sl, "status")

			logger.Status(s.StatusReport())
			logger.Status("Time to process %v", s.TimeToProcess)
		}
	}
}