示例#1
0
func (p WOFPointInPolygon) GetByLatLonForPlacetype(lat float64, lon float64, placetype string) ([]*geojson.WOFSpatial, []*WOFPointInPolygonTiming) {

	var c metrics.Counter
	c = *p.Metrics.CountLookups
	go c.Inc(1)

	t := time.Now()

	timings := make([]*WOFPointInPolygonTiming, 0)

	intersects, duration := p.GetIntersectsByLatLon(lat, lon)
	timings = append(timings, NewWOFPointInPolygonTiming("intersects", duration))

	inflated, duration := p.InflateSpatialResults(intersects)
	timings = append(timings, NewWOFPointInPolygonTiming("inflate", duration))

	// See what's going on here? We are filtering by placetype before
	// do a final point-in-poly lookup so we don't try to load country
	// records while only searching for localities

	filtered := make([]*geojson.WOFSpatial, 0)

	if placetype != "" {
		filtered, duration = p.FilterByPlacetype(inflated, placetype)
		timings = append(timings, NewWOFPointInPolygonTiming("filter", duration))
	} else {
		filtered = inflated
	}

	contained, duration := p.EnsureContained(lat, lon, filtered)
	timings = append(timings, NewWOFPointInPolygonTiming("contain", duration))

	d := time.Since(t)

	var tm metrics.Timer
	tm = *p.Metrics.TimeToProcess
	go tm.Update(d)

	ttp := float64(d) / 1e9

	if ttp > 0.5 {
		p.Logger.Warning("time to process %f,%f (%s) exceeds 0.5 seconds: %f", lat, lon, placetype, ttp)

		for _, t := range timings {
			p.Logger.Info("[%s] %f", t.Event, t.Duration)
		}
	}

	return contained, timings
}
示例#2
0
func (p WOFPointInPolygon) GetIntersectsByRect(rect *rtreego.Rect) ([]rtreego.Spatial, time.Duration) {

	t := time.Now()

	results := p.Rtree.SearchIntersect(rect)

	d := time.Since(t)

	var tm metrics.Timer
	tm = *p.Metrics.TimeToIntersect
	go tm.Update(d)

	return results, d
}
示例#3
0
func dumpTimer(name string, metric gometrics.Timer) {
	m := metric.Snapshot()
	logger.WithFields(logrus.Fields{
		"name":                name,
		"count":               m.Count(),
		"sum":                 m.Sum(),
		"min":                 m.Min(),
		"max":                 m.Max(),
		"mean":                m.Mean(),
		"stddev":              m.StdDev(),
		"variance":            m.Variance(),
		"rate-one-minute":     m.Rate1(),
		"rate-five-minute":    m.Rate5(),
		"rate-fifteen-minute": m.Rate15(),
		"rate-mean":           m.RateMean(),
	}).Info()
}
示例#4
0
func (p WOFPointInPolygon) InflateSpatialResults(results []rtreego.Spatial) ([]*geojson.WOFSpatial, time.Duration) {

	t := time.Now()

	inflated := make([]*geojson.WOFSpatial, 0)

	for _, r := range results {

		// https://golang.org/doc/effective_go.html#interface_conversions

		wof := r.(*geojson.WOFSpatial)
		inflated = append(inflated, wof)
	}

	d := time.Since(t)

	var tm metrics.Timer
	tm = *p.Metrics.TimeToInflate
	go tm.Update(d)

	return inflated, d
}
示例#5
0
func (p WOFPointInPolygon) LoadGeoJSON(path string) (*geojson.WOFFeature, error) {

	t := time.Now()

	feature, err := geojson.UnmarshalFile(path)

	d := time.Since(t)

	var tm metrics.Timer
	tm = *p.Metrics.TimeToUnmarshal

	go tm.Update(d)

	if err != nil {
		p.Logger.Error("failed to unmarshal %s, because %s", path, err)
		return nil, err
	}

	var c metrics.Counter
	c = *p.Metrics.CountUnmarshal
	go c.Inc(1)

	return feature, err
}
示例#6
0
func (p WOFPointInPolygon) EnsureContained(lat float64, lon float64, results []*geojson.WOFSpatial) ([]*geojson.WOFSpatial, time.Duration) {

	// Okay - this isn't super complicated but it might look a bit scary
	// We're using a WaitGroup to process each possible result *and* we
	// are using (n) sub WaitGroups to process every polygon for each result
	// separately (20151020/thisisaaronland)

	// See also: https://talks.golang.org/2012/concurrency.slide#46
	// This is not what we're doing but it's essentially what the WaitGroups
	// implement but with a different syntax/pattern

	wg := new(sync.WaitGroup)
	wg.Add(len(results))

	contained := make([]*geojson.WOFSpatial, 0)
	// timings := make([]*WOFPointInPolygonTiming, 0)

	t := time.Now()

	for _, wof := range results {

		wg_ensure := func(wof *geojson.WOFSpatial) {

			defer wg.Done()

			polygons, err := p.LoadPolygons(wof)

			if err != nil {
				p.Logger.Error("failed to load polygons for %d, because %v", wof.Id, err)
				return
			}

			is_contained := false

			wg2 := new(sync.WaitGroup)
			wg2.Add(len(polygons))

			for _, poly := range polygons {

				wg_contains := func(p *geojson.WOFPolygon, lt float64, ln float64) {

					defer wg2.Done()

					if p.Contains(lt, ln) {
						is_contained = true
					}
				}

				go wg_contains(poly, lat, lon)
			}

			wg2.Wait()

			// All done checking the polygons - are we contained?

			// d2 := time.Since(t2)
			// contain_event := fmt.Sprintf("contain %d (%d/%d iterations, %d points)", id, iters, count, points)
			// timings = append(timings, NewWOFPointInPolygonTiming(contain_event, d2))

			if is_contained {
				contained = append(contained, wof)
			}
		}

		go wg_ensure(wof)
	}

	wg.Wait()

	// All done checking the results

	d := time.Since(t)

	var tm metrics.Timer
	tm = *p.Metrics.TimeToContain
	go tm.Update(d)

	// there is a weird thing happening here that I don't entirely understand
	// It *looks* like some part of the for loops or the waitgroup scaffolding
	// is adding 0.1 to 0.3 seconds to the total processing time which sounds
	// insane, I know. All I can say is that the sum of all the timings above
	// for each result always seem to be less than 'ttc' below. So confused...
	// (20151020/thisisaaronland)

	/*
		ttc := float64(d) / 1e9

		if ttc > 0.4 {
			p.Logger.Warning("time to contains exceeds threshold of 0.4 seconds: %f", ttc)
		}
	*/

	return contained, d
}
示例#7
0
func endpointStat(t metrics.Timer) *pstats.EndpointStat {
	return &pstats.EndpointStat{
		Rate1:   proto.Float32(rate(t.Rate1())),
		Rate5:   proto.Float32(rate(t.Rate5())),
		Rate15:  proto.Float32(rate(t.Rate15())),
		Mean:    proto.Float32(milli(t.Mean())),
		StdDev:  proto.Float32(milli(t.StdDev())),
		Upper95: proto.Float32(milli(t.Percentile(0.95))),
	}
}
示例#8
0
func (p WOFPointInPolygon) EnsureContained(lat float64, lon float64, results []*geojson.WOFSpatial) ([]*geojson.WOFSpatial, time.Duration) {

	// Okay - this isn't super complicated but it might look a bit scary
	// We're using a WaitGroup to process each possible result *and* we
	// are using (n) sub WaitGroups to process every polygon for each result
	// separately (20151020/thisisaaronland)

	// See also: https://talks.golang.org/2012/concurrency.slide#46
	// This is not what we're doing but it's essentially what the WaitGroups
	// implement but with a different syntax/pattern

	wg := new(sync.WaitGroup)

	/*
		Matt Amos [11:57]
		wow. i just found something unexpected: when i move `wg.Add(1)` out of the loop and change it to `wg.Add(1000000)`
		the runtime goes up from 600ms to 3s!

		Aaron Cope [11:58]
		that’s… a thing

		[11:58]
		it might also explain some lag that I’ve never been able to account for

		[11:58]
		I guess maybe Go is starting 1M “things” in the background, maybe?
	*/

	mu := new(sync.Mutex)

	contained := make([]*geojson.WOFSpatial, 0)
	// timings := make([]*WOFPointInPolygonTiming, 0)

	t := time.Now()

	for _, wof := range results {

		wg.Add(1)

		wg_ensure := func(wof *geojson.WOFSpatial) {

			defer wg.Done()

			polygons, err := p.LoadPolygons(wof)

			if err != nil {
				p.Logger.Error("failed to load polygons for %d, because %v", wof.Id, err)
				return
			}

			is_contained := false

			wg2 := new(sync.WaitGroup)

			for _, poly := range polygons {

				wg2.Add(1)

				wg_contains := func(p *geojson.WOFPolygon, lt float64, ln float64) {

					defer wg2.Done()

					if p.Contains(lt, ln) {
						is_contained = true
					}
				}

				go wg_contains(poly, lat, lon)
			}

			wg2.Wait()

			// All done checking the polygons - are we contained?

			// d2 := time.Since(t2)
			// contain_event := fmt.Sprintf("contain %d (%d/%d iterations, %d points)", id, iters, count, points)
			// timings = append(timings, NewWOFPointInPolygonTiming(contain_event, d2))

			/*

				See this? This is important. Specifically the part where we are locking
				updates to the 'contained' array. Which makes total sense obviously except
				for the part where I completely spaced on this (despite all the chatter and
				error checking around the waitgroups aboves...) and hilarity inevitably
				ensued. So you know, don't do what I did... (20160112/thisisaaronland)

				https://github.com/whosonfirst/go-whosonfirst-pip/commit/986e527dbe9e62915757489db7c70d5140c53629
				https://github.com/whosonfirst/go-whosonfirst-pip/issues/15
				https://github.com/whosonfirst/go-whosonfirst-pip/issues/18
			*/

			if is_contained {
				mu.Lock()
				contained = append(contained, wof)
				mu.Unlock()
			}
		}

		go wg_ensure(wof)
	}

	wg.Wait()

	// All done checking the results

	d := time.Since(t)

	var tm metrics.Timer
	tm = *p.Metrics.TimeToContain
	go tm.Update(d)

	return contained, d
}