forked from DataSparkRI/ProfilesApiGo
/
profiles_api.go
1301 lines (1117 loc) · 35.5 KB
/
profiles_api.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
TODO:
* Document What you have learned
* Allow for the rest of the shapefile types
* Use file logger
* Add Way list indicator slugs
* Need to support Multiple Times
* Query error should return proper exit code
* STANDARDIZE Using query results. This is not DRY at ALL!
URL Examples:
indicator data
[host]/[slug]?time=<validtime>&geos=<id,id,id>&geom=t<optional_geojson-that will return the geometry>
127.0.0.1:8080/indicator/total-population?time=2000&geos=*
shpfiles
[host]/shp/?geoms=1,3,4,5<ids> Arbitrary Geometry Id TODO: accomodate different types ( POINT, LINE )
shpfiles geoquery
Ex: get geoms in a specific geom Ex: 419 where lev = 6
[host]shp/q/?geoms=419&lev=6&q=IN
get geographies by level
[host]/geos/level/:slug
Optional:
?filter=FUZZYMATCH via geokey
*/
package main
import (
"database/sql"
"encoding/csv"
"encoding/json"
"errors"
"fmt"
"github.com/ProvidencePlan/profiles_api/cache"
"github.com/codegangsta/martini"
_ "github.com/lib/pq"
"io/ioutil"
"log"
"net/http"
"os"
"regexp"
"sort"
"strconv"
"strings"
)
type CONFIG struct {
DB_HOST string
DB_PORT string
DB_NAME string
DB_USER string
DB_PASS string
REDIS_CONN string //host and port ex: 127.0.0.1:6379
CACHE_EXPIRE int
}
// TODO: this stuct needs to be implemented everywhere we fetch FlatValue data.
type FlatValue struct {
Display_title string
Geography_name string
Geography_geokey string
Time_key string
Value_type string
Number sql.NullFloat64
Percent sql.NullFloat64
Moe sql.NullFloat64
F_number sql.NullString
F_percent sql.NullString
F_moe sql.NullString
}
// Returns a Map for each field as a string
func (f *FlatValue) ToMap() map[string]string {
m := map[string]string{}
if !f.Number.Valid {
m["number"] = ""
} else {
if f.Value_type == "i"{
m["number"] = f.F_number.String
}else{
m["number"] = "n/a"
}
}
if !f.Percent.Valid {
m["percent"] = ""
} else {
if f.F_percent.String == "0.0%"{
m["percent"] = ""
}else{
m["percent"] = f.F_percent.String
}
}
if !f.Moe.Valid {
m["moe"] = ""
m["pct_moe"] = ""
} else {
if f.Value_type != "i"{
// this is a denominator and we moe should be a percentage.
m["moe"] = "n/a"
m["pct_moe"] = fmt.Sprintf("%v%%", f.Moe.Float64)
// we need to n/a estimates
m["number"] = "n/a"
}else{
// regular indicator moe
m["moe"] = fmt.Sprintf("%v", f.Moe.Float64)
m["pct_moe"] = "n/a"
}
}
return m
}
func toSlice(f FlatValue) []string { // TODO: why cant I make this part of the struct. Error is: Cannot call pointer method on val.
s := []string{}
fM := f.ToMap()
// append fields in order, we cant simply loop over the keys.
s = append(s, []string{fM["number"], fM["moe"], fM["percent"], fM["pct_moe"]}...)
return s
}
func getFromCache(connStr string, hash string) []byte {
rConn, err := cache.RedisConn(connStr)
if err == nil {
defer rConn.Close()
r, err := cache.GetKey(rConn, hash) // if this is empty, r is a empty byte len(r) == 0
if err == nil {
return r
}
}
var b []byte
return b
}
func putInCache(connStr string, hash string, val []byte, expire int) {
rConn, err := cache.RedisConn(connStr)
if err == nil {
cache.SetKey(rConn, hash, val, expire)
defer rConn.Close()
}
}
// grab meta data for given ind in a new go routine
func getMetaData(c chan []interface{}, db *sql.DB, ind_slug string) {
var (
indicator_id int
slug string
display_title string
time_key string
)
id_query := "SELECT indicator_id from profiles_flatvalue WHERE indicator_slug =$1 LIMIT 1"
err := db.QueryRow(id_query, ind_slug).Scan(&indicator_id)
if err != nil {
log.Println("Error running query in getMetaData", id_query, "params:"+ind_slug, err)
}
query := "SELECT DISTINCT indicator_slug, display_title, time_key from profiles_flatvalue WHERE indicator_id=$1;"
stmt, err := db.Prepare(query)
if err != nil {
log.Println("Error preparing query %s in getMetaData", query)
}
defer stmt.Close()
rows, err := stmt.Query(indicator_id)
if err != nil {
log.Println("Error running query %s in getMetaData", query)
}
results := []interface{}{}
for rows.Next() {
mrow := make(map[string]interface{})
err := rows.Scan(&slug, &display_title, &time_key)
if err != nil {
log.Println("Error running query %s in getMetaData", query)
}
mrow["slug"] = slug
mrow["title"] = display_title
mrow["time_key"] = time_key
results = append(results, mrow)
}
c <- results
}
// Indicator Api Handler. Returns values only
func getData(ind string, time string, raw_geos string, conf CONFIG) []byte {
hash := cache.MakeHash(ind + time + raw_geos)
c := getFromCache(conf.REDIS_CONN, hash)
if len(c) != 0 {
log.Println("Serving getData from cache: ", ind+time+raw_geos)
return c
}
var (
indicator_slug string
display_title string
geography_id int
geography_name string
//geography_slug string
geometry_id sql.NullInt64
value_type string
time_key string
number sql.NullFloat64
percent sql.NullFloat64
moe sql.NullFloat64
numerator sql.NullFloat64
numerator_moe sql.NullFloat64
f_number sql.NullString
f_percent sql.NullString
f_moe sql.NullString
f_numerator sql.NullString
f_numerator_moe sql.NullString
)
/* SANITIZING INPUTS */
cleaned_geos, err := sanitize(raw_geos, "[0-9,\\*]+")
if err != nil {
r := []byte("405")
return r
}
data := map[string]interface{}{} // this will be the object that wraps everything
db, err := getDB(conf)
if err != nil {
log.Println("Error trying to call getDB")
r := []byte("500")
return r
}
defer db.Close()
var base_query string = "SELECT indicator_slug, display_title, geography_id, geography_name, geometry_id, value_type, time_key, number, percent, moe, numerator,numerator_moe, f_number, f_percent, f_moe, f_numerator, f_numerator_moe FROM profiles_flatvalue WHERE indicator_slug = $1 AND time_key= $2"
var query string
// we need to support getting * geos or specific ones via thier ids
if cleaned_geos == "*" {
query = base_query
} else {
query = base_query + "AND geography_id IN (" + cleaned_geos + ")"
}
stmt, err := db.Prepare(query)
if err != nil {
log.Println("Error preparing query %s in getData", query)
}
defer stmt.Close()
rows, err := stmt.Query(ind, time)
if err != nil {
log.Println("Error running query %s in getData", query)
}
results := []interface{}{}
metachan := make(chan []interface{}) // get the meta data
go getMetaData(metachan, db, ind)
data["related"] = <-metachan
for rows.Next() {
jrow := make(map[string]interface{})
err := rows.Scan(&indicator_slug, &display_title, &geography_id, &geography_name, &geometry_id, &value_type, &time_key, &number, &percent, &moe, &numerator, &numerator_moe, &f_number, &f_percent, &f_moe, &f_numerator, &f_numerator_moe)
if err != nil {
log.Fatal(err)
}
jrow["indicator_slug"] = indicator_slug
jrow["display_title"] = display_title
jrow["geography_id"] = geography_id
jrow["geography_name"] = geography_name
jrow["value_type"] = value_type
jrow["time_key"] = time_key
if number.Valid {
jrow["number"] = number.Float64
} else {
jrow["number"] = nil
}
if value_type != "i" {
if percent.Valid {
jrow["percent"] = percent.Float64
} else {
jrow["percent"] = nil
}
} else {
jrow["percent"] = nil
}
if moe.Valid {
jrow["moe"] = moe.Float64
} else {
jrow["moe"] = nil
}
if f_number.Valid {
jrow["f_number"] = f_number.String
} else {
jrow["f_number"] = nil
}
if value_type != "i" {
if numerator.Valid {
jrow["numerator"] = numerator.Float64
} else {
jrow["numerator"] = nil
}
if f_numerator.Valid {
jrow["f_numerator"] = f_numerator.String
} else {
jrow["f_numerator"] = nil
}
if numerator.Valid {
jrow["numerator_moe"] = numerator.Float64
} else {
jrow["numerator_moe"] = nil
}
if f_numerator.Valid {
jrow["f_numerator_moe"] = f_numerator.String
} else {
jrow["f_numerator_moe"] = nil
}
if f_percent.Valid {
jrow["f_percent"] = f_percent.String
} else {
jrow["f_percent"] = nil
}
} else {
jrow["f_percent"] = nil
}
if f_moe.Valid {
jrow["f_moe"] = f_moe.String
} else {
jrow["f_moe"] = nil
}
results = append(results, jrow)
}
data["objects"] = &results
j, err := json.Marshal(data)
if len(results) != 0 {
putInCache(conf.REDIS_CONN, hash, j, conf.CACHE_EXPIRE)
}
return j
}
// Write csv formated data to w
// inds is string of indicator ids, raw_goes is a string of geo_ids
// We validate them first and return an error if anything happens
func getDataCSV(res http.ResponseWriter, inds string, raw_geos string, config CONFIG) {
/* SANITIZING INPUTS */
cleaned_geos, err := sanitize(raw_geos, "[0-9,]+")
if err != nil {
http.Error(res, "", 500)
return
}
cleaned_inds, err := sanitize(inds, "[0-9,]+")
if err != nil {
http.Error(res, "", 500)
return
}
// Now we need to decide whether or not we want to give the user a single indicator
// or a single geography with many indicators
splitInds := strings.Split(cleaned_inds, ",")
splitGeos := strings.Split(cleaned_geos, ",")
if len(splitInds) == 1 {
// Geos can be N
} else if len(splitInds) > 1 {
cleaned_geos = splitGeos[0]
}
db, err := getDB(config)
if err != nil {
log.Println("Error trying to call getDB: GetDataCSV")
http.Error(res, "", 500)
return
}
defer db.Close()
var time string
var timeSet []string
var flatValues = make(map[string]map[string]map[string]FlatValue) //{indid:{geoid:{time1, time2}}}
var flatValKeys = []string{}
// FETCH the Distinct Times in our Dataset
timesQ := "SELECT DISTINCT time_key FROM profiles_flatvalue WHERE indicator_id IN (%v) AND geography_id IN(%v) AND time_key != 'change'"
timesQ = fmt.Sprintf(timesQ, cleaned_inds, cleaned_geos)
tRows, err := db.Query(timesQ)
if err != nil {
fmt.Println(err)
}
for tRows.Next() {
err := tRows.Scan(&time)
if err != nil {
log.Fatal(err)
}
timeSet = append(timeSet, time)
}
sort.Strings(timeSet)
// Now Fetch the Data
query := "WITH T1 AS (SELECT indicator_id, display_title, geography_name, geography_geo_key, time_key, value_type, number, percent, moe, f_number, f_percent, f_moe FROM profiles_flatvalue WHERE indicator_id IN (%v) AND geography_id IN(%v) AND time_key != 'change'), T2 AS (SELECT DISTINCT ON (indicators_id) * from profiles_groupindex WHERE indicators_id IN (%v)), T3 AS (SELECT T1.*, T2.* FROM T1 LEFT OUTER JOIN T2 ON T1.indicator_id=T2.indicators_id) SELECT display_title, geography_name, geography_geo_key, time_key, value_type, number, percent, moe, f_number, f_percent, f_moe FROM T3 ORDER BY \"order\""
query = fmt.Sprintf(query, cleaned_inds, cleaned_geos, cleaned_inds)
stmt, err := db.Prepare(query)
if err != nil {
log.Println("Error Preparing query: getDataCSV")
http.Error(res, "", 500)
return
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
log.Fatal(err)
}
// TODO: This is a HACK. Create a geo names map as we go
geoNamesToIds := map[string]string{}
for rows.Next() {
v := FlatValue{}
err := rows.Scan(&v.Display_title, &v.Geography_name, &v.Geography_geokey, &v.Time_key, &v.Value_type, &v.Number, &v.Percent, &v.Moe, &v.F_number, &v.F_percent, &v.F_moe)
if err == nil {
// collect the the geokey and name
geoNamesToIds[v.Geography_name] = v.Geography_geokey
// add a new key to our map for each indicator if it doesnt exists
_, exists := flatValues[v.Display_title]
if !exists {
//-------------------------------------geoid: {timekey: FV}-----------------------------------//
flatValues[v.Display_title] = make(map[string]map[string]FlatValue)
flatValKeys = append(flatValKeys, v.Display_title)
}
// Now we add the geography key
_, exists = flatValues[v.Display_title][v.Geography_name]
if !exists {
// it doesnt exist so we are gonna create a key for the new geokey
flatValues[v.Display_title][v.Geography_name] = make(map[string]FlatValue)
// now add placeholders for all the times.
for _, t := range timeSet {
flatValues[v.Display_title][v.Geography_name][t] = FlatValue{Display_title: v.Display_title, Time_key: t}
}
}
// actually store the data
flatValues[v.Display_title][v.Geography_name][v.Time_key] = v
}
}
csvWriter := csv.NewWriter(res)
header := []string{"indicator", "geography_name", "geography_id"}
// generate a header
for _, tVal := range timeSet {
header = append(header, []string{tVal + "_est", tVal + "_moe", tVal + "_pct", tVal + "_pct_moe"}...)
}
csvWriter.Write(header)
csvWriter.Flush()
// at this point our values are prepped for export
for _, indKey:= range flatValKeys {
// now iterate Geos
sortedGeoKeys := sortGeoKeys(flatValues[indKey])
for _, sgk := range sortedGeoKeys {
csvRow := []string{indKey}
csvRow = append(csvRow, []string{sgk, geoNamesToIds[sgk]}...)
// now iterate time vals
indGeo := flatValues[indKey][sgk]
for _, time := range timeSet {
csvRow = append(csvRow, toSlice(indGeo[time])...)
}
csvWriter.Write(csvRow)
csvWriter.Flush()
}
}
}
func getDataGeoJson(ind string, time string, raw_geos string, conf CONFIG) []byte {
// Join indicator data with shapefiles geoms
hash := cache.MakeHash("gdgj:" + ind + time + raw_geos)
c := getFromCache(conf.REDIS_CONN, hash)
if len(c) != 0 {
log.Println("Serving getData from cache gdgj: ", ind+time+raw_geos)
return c
}
var (
//indicator_id int
indicator_slug string
display_title string
geography_id int
geography_name string
//geography_slug string
geometry_id sql.NullInt64
value_type string
time_key string
number sql.NullFloat64
percent sql.NullFloat64
moe sql.NullFloat64
numerator sql.NullFloat64
numerator_moe sql.NullFloat64
f_number sql.NullString
f_percent sql.NullString
f_moe sql.NullString
f_numerator sql.NullString
f_numerator_moe sql.NullString
geom sql.NullString
)
/* SANITIZING INPUTS */
cleaned_geos, err := sanitize(raw_geos, "[0-9,\\*]+")
if err != nil {
r := []byte("405")
return r
}
data := map[string]interface{}{} // this will be the object that wraps everything
db, err := getDB(conf)
if err != nil {
log.Println("Error trying to call getDB")
r := []byte("500")
return r
}
defer db.Close()
var base_query = "SELECT profiles_flatvalue.indicator_slug, profiles_flatvalue.display_title, profiles_flatvalue.geography_id, profiles_flatvalue.geography_name, profiles_flatvalue.geometry_id, profiles_flatvalue.value_type, profiles_flatvalue.time_key, profiles_flatvalue.number, profiles_flatvalue.percent, profiles_flatvalue.moe, profiles_flatvalue.numerator, profiles_flatvalue.numerator_moe, profiles_flatvalue.f_number, profiles_flatvalue.f_percent, profiles_flatvalue.f_moe, profiles_flatvalue.f_numerator, profiles_flatvalue.f_numerator_moe, ST_AsGeoJSON(maps_polygonmapfeature.geom) AS geom FROM profiles_flatvalue LEFT OUTER JOIN maps_polygonmapfeature ON (profiles_flatvalue.geography_geo_key = maps_polygonmapfeature.geo_key) WHERE profiles_flatvalue.indicator_slug = $1 AND profiles_flatvalue.time_key= $2"
var query string
// we need to support getting * geos or specific ones via thier ids also we need to be able to join on a geom
if cleaned_geos == "*" {
query = base_query
} else {
query = base_query + " AND profiles_flatvalue.geography_id IN (" + cleaned_geos + ")"
}
stmt, err := db.Prepare(query)
if err != nil {
log.Println("Error runnning query: getDataGeoJson")
r := []byte("500")
return r
}
defer stmt.Close()
rows, err := stmt.Query(ind, time)
if err != nil {
log.Println("Error runnning query: getDataGeoJson")
r := []byte("500")
return r
}
results := []interface{}{}
metachan := make(chan []interface{}) // get the meta data
go getMetaData(metachan, db, ind)
data["related"] = <-metachan
for rows.Next() {
jrow := make(map[string]interface{})
err := rows.Scan(&indicator_slug, &display_title, &geography_id, &geography_name, &geometry_id, &value_type, &time_key, &number, &percent, &moe, &numerator, &numerator_moe, &f_number, &f_percent, &f_moe, &f_numerator, &f_numerator_moe, &geom)
if err == nil {
if geom.Valid {
jrow = jsonLoads(geom.String)
} else {
jrow["coordinates"] = nil
}
properties := make(map[string]interface{})
properties["label"] = geography_name
properties["geography_id"] = geography_id
values := make(map[string]interface{})
values["indicator_slug"] = indicator_slug
values["value_type"] = value_type
values["time_key"] = time_key
if number.Valid {
values["number"] = number.Float64
} else {
values["number"] = nil
}
if value_type != "i" {
if percent.Valid {
values["percent"] = percent.Float64
} else {
values["percent"] = nil
}
} else {
values["percent"] = nil
}
if moe.Valid {
values["moe"] = moe.Float64
} else {
values["moe"] = nil
}
if f_number.Valid {
values["f_number"] = f_number.String
} else {
values["f_number"] = nil
}
if value_type != "i" {
if numerator.Valid {
values["numerator"] = numerator.Float64
} else {
values["numerator"] = nil
}
if f_numerator.Valid {
values["f_numerator"] = f_numerator.String
} else {
values["f_numerator"] = nil
}
if numerator_moe.Valid {
values["numerator_moe"] = numerator_moe.Float64
} else {
values["numerator_moe"] = nil
}
if f_numerator.Valid {
values["f_numerator_moe"] = f_numerator_moe.String
} else {
values["f_numerator_moe"] = nil
}
if f_percent.Valid {
values["f_percent"] = f_percent.String
} else {
values["f_percent"] = nil
}
} else {
values["f_percent"] = nil
}
if f_moe.Valid {
values["f_moe"] = f_moe.String
} else {
values["f_moe"] = nil
}
jrow["properties"] = &properties
jrow["values"] = &values
results = append(results, jrow)
}
}
data["objects"] = &results
j, err := json.Marshal(data)
if len(results) != 0 {
putInCache(conf.REDIS_CONN, hash, j, conf.CACHE_EXPIRE)
}
return j
}
func getGeomsByGeosId(geos_ids string, conf CONFIG) []byte {
/*
geos_ids is a comma delimited string of IDS found in the profiles_georecord_table
*/
hash := cache.MakeHash("shp:" + geos_ids)
c := getFromCache(conf.REDIS_CONN, hash)
if len(c) != 0 {
log.Println("Serving getData from shp cache:", geos_ids)
return c
}
var (
geos_id int
geos_name string
geos_slug string
geo_key string
geom string
)
cleaned_geos, err := sanitize(geos_ids, "[0-9,\\*]+")
if err != nil {
r := []byte("405")
return r
}
db, err := getDB(conf)
if err != nil {
log.Println("Error trying to call getDB")
r := []byte("500")
return r
}
defer db.Close()
base_query := "SELECT profiles_georecord.geo_id, profiles_georecord.name, profiles_georecord.slug, profiles_georecord.geo_id, ST_ASGeoJSON(maps_polygonmapfeature.geom) as geom FROM profiles_georecord, maps_polygonmapfeature WHERE profiles_georecord.id IN(" + cleaned_geos + ") AND profiles_georecord.geo_id = maps_polygonmapfeature.geo_key"
rows, err := db.Query(base_query)
if err != nil {
log.Println("Error runnning query: getGeomsByGeosId")
r := []byte("500")
return r
}
defer rows.Close()
data := map[string]interface{}{} // this will be the object that wraps everything
results := []interface{}{}
for rows.Next() {
err := rows.Scan(&geos_id, &geos_name, &geos_slug, &geo_key, &geom)
if err == nil {
properties := make(map[string]interface{})
properties["geo_key"] = geo_key
properties["label"] = geos_name
properties["slug"] = geos_slug
geom := jsonLoads(geom)
geom["properties"] = &properties
results = append(results, geom)
}
}
data["objects"] = &results
j, err := json.Marshal(data)
if len(results) != 0 {
putInCache(conf.REDIS_CONN, hash, j, conf.CACHE_EXPIRE)
}
return j
}
// Get Geoms by a list of geometry ids
func getGeomsById(geoms_ids string, conf CONFIG) []byte {
/*
geoms_ids is a comma delimited string
*/
hash := cache.MakeHash("shp:" + geoms_ids)
c := getFromCache(conf.REDIS_CONN, hash)
if len(c) != 0 {
log.Println("Serving getData from shp cache:", geoms_ids)
return c
}
var (
geom string
geo_key string
label string
)
cleaned_geos, err := sanitize(geoms_ids, "[0-9,\\*]+")
if err != nil {
r := []byte("405")
return r
}
db, err := getDB(conf)
if err != nil {
log.Println("Error trying to call getDB")
r := []byte("500")
return r
}
defer db.Close()
rows, err := db.Query("SELECT geo_key, label, ST_AsGeoJSON(geom) FROM maps_polygonmapfeature WHERE id IN (" + cleaned_geos + ")")
if err != nil {
log.Println("Error runnning query: getGeomsById")
r := []byte("500")
return r
}
defer rows.Close()
data := map[string]interface{}{} // this will be the object that wraps everything
results := []interface{}{}
for rows.Next() {
err := rows.Scan(&geo_key, &label, &geom)
if err == nil {
properties := make(map[string]interface{})
properties["geo_key"] = geo_key
properties["label"] = label
geom := jsonLoads(geom)
geom["properties"] = properties
results = append(results, geom)
}
}
data["objects"] = &results
j, err := json.Marshal(data)
if len(results) != 0 {
putInCache(conf.REDIS_CONN, hash, j, conf.CACHE_EXPIRE)
}
return j
}
// Return a list of polygons that are IN or OF geom_id depending on what the geo_lev_id is
/* Example:
IN query
Find geoms of geolevel 6 that are in geom 419. lev=N or *
http://127.0.0.1:8080/shp/q/?geoms=419&lev=6&q=IN
OF query
Find geoms of geolevel 1 that contain geoms 380
http://127.0.0.1:8080/shp/q/?geoms=380&lev=1&q=OF
*/
func getGeoQuery(conf CONFIG, geoms_ids string, geo_lev_id string, query_type string) []byte {
hash := cache.MakeHash("gGQ:" + geoms_ids + geo_lev_id + query_type)
c := getFromCache(conf.REDIS_CONN, hash)
if len(c) != 0 {
log.Println("Serving getData from cache: ", "gGQ:"+geoms_ids+geo_lev_id+query_type)
return c
}
cleaned_geoms, err := sanitize(geoms_ids, "[0-9,\\*]+")
if err != nil {
r := []byte("405")
return r
}
cleaned_geo_lev, err := sanitize(geo_lev_id, "[0-9,]+") // supports mulitple levels
if err != nil {
r := []byte("405")
return r
}
cleaned_query_type, err := sanitize(query_type, "(IN|OF)")
if err != nil {
r := []byte("405")
return r
}
// also make sure the sanitized query_Type is IN or OF
if cleaned_query_type != "IN" && cleaned_query_type != "OF" {
r := []byte("405")
return r
}
var (
geomId int
geosId int
geoKey string
label string
slug string
)
var geom_query string
where_clause := " WHERE profiles_georecord.level_id IN (" + cleaned_geo_lev + ") AND "
if cleaned_query_type == "IN" {
// find geoms contained in this geom
geom_query = "WITH targ_levs AS (SELECT maps_polygonmapfeature.id as geom_id, maps_polygonmapfeature.geo_key, ST_SimplifyPreserveTopology(maps_polygonmapfeature.geom, 0.001) AS geom, profiles_georecord.id as id, profiles_georecord.slug, profiles_georecord.name as label FROM maps_polygonmapfeature, profiles_georecord" + where_clause + "maps_polygonmapfeature.geo_key=profiles_georecord.geo_id), targ_geom AS (SELECT id, geo_key, ST_SimplifyPreserveTopology(geom, 0.001) AS geom FROM maps_polygonmapfeature WHERE id IN (" + cleaned_geoms + ") LIMIT 1) SELECT DISTINCT ON (targ_levs.geo_key) targ_levs.id, targ_levs.geom_id, targ_levs.geo_key, targ_levs.label, targ_levs.slug FROM targ_levs, targ_geom WHERE ST_Area(ST_Intersection(targ_levs.geom, targ_geom.geom)) > ST_Area(targ_levs.geom)/2 ORDER BY targ_levs.geo_key"
} else if cleaned_query_type == "OF" {
// find geoms that contain geom
geom_query = "WITH targ_levs AS (SELECT maps_polygonmapfeature.id as geom_id, maps_polygonmapfeature.geo_key, ST_SimplifyPreserveTopology(maps_polygonmapfeature.geom, 0.001) AS geom, profiles_georecord.id as id, profiles_georecord.slug, profiles_georecord.name as label FROM maps_polygonmapfeature, profiles_georecord" + where_clause + "maps_polygonmapfeature.geo_key=profiles_georecord.geo_id), targ_geom AS (SELECT id, geo_key, ST_SimplifyPreserveTopology(geom, 0.001) AS geom FROM maps_polygonmapfeature WHERE id IN (" + cleaned_geoms + ") LIMIT 1) SELECT DISTINCT ON (targ_levs.geo_key) targ_levs.id, targ_levs.geom_id, targ_levs.geo_key, targ_levs.label, targ_levs.slug FROM targ_levs, targ_geom WHERE ST_Area(ST_Intersection(targ_levs.geom, targ_geom.geom)) > ST_Area(targ_geom.geom)/2 ORDER BY targ_levs.geo_key"
}
//TODO: We tend to always run querires like this, why not abstract it
db, err := getDB(conf)
if err != nil {
log.Println("Error trying to call getDB")
r := []byte("500")
return r
}
defer db.Close()
stmt, err := db.Prepare(geom_query)
if err != nil {
log.Println("Error preparing query: ", geom_query)
r := []byte("405")
return r
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
log.Println("Error running query ", geom_query)
log.Println(err)
r := []byte("405")
return r
}
data := map[string]interface{}{} // this will be the object that wraps everything
results := []interface{}{}
for rows.Next() {
jrow := make(map[string]interface{})
err := rows.Scan(&geosId, &geomId, &geoKey, &label, &slug)
if err == nil {
jrow["id"] = geosId
jrow["geom_id"] = geomId
jrow["geoKey"] = geoKey
jrow["label"] = label
jrow["slug"] = slug
results = append(results, jrow)
} else {
log.Println("Error in row: %s ", err)
}
}
data["objects"] = &results
j, err := json.Marshal(data)
if len(results) != 0 {
putInCache(conf.REDIS_CONN, hash, j, conf.CACHE_EXPIRE)
}
return j
}
// Return GeoRecords by Level slug
func getGeosByLevSlug(conf CONFIG, levslug string, filter_key string) []byte {
hash := cache.MakeHash("gGByLS:" + levslug + filter_key)
c := getFromCache(conf.REDIS_CONN, hash)
if len(c) != 0 {
log.Println("Serving getData from cache: ", "gGbyLS:"+levslug)
return c
}
var (
id int
geoKey string
slug string
name string
)
cleaned_slug, err := sanitize(levslug, "[-\\w]+")
if err != nil {
r := []byte("405")
return r
}
cleaned_filter, err := sanitize(filter_key, "[-\\w\\d]+")
if err != nil {
//r:=[]byte("405")
//return r
cleaned_filter = ""
}
db, err := getDB(conf)
if err != nil {
log.Println("Error trying to call getDB")
r := []byte("500")
return r
}
defer db.Close()
query := "SELECT profiles_georecord.id, profiles_georecord.geo_id, profiles_georecord.slug, profiles_georecord.name FROM profiles_geolevel FULL JOIN profiles_georecord ON profiles_georecord.level_id = profiles_geolevel.id WHERE profiles_geolevel.slug=$1"
if cleaned_filter != "" {
query += " AND profiles_georecord.geo_id ILIKE '%" + cleaned_filter + "%'"
}
rows, err := db.Query(query, cleaned_slug)
if err != nil {
log.Println("Error runnning query: %s", query)
r := []byte("500")
return r
}
defer rows.Close()
data := map[string]interface{}{} // this will be the object that wraps everything
results := []interface{}{}
for rows.Next() {
err := rows.Scan(&id, &geoKey, &slug, &name)
if err == nil {
jrow := make(map[string]interface{})
jrow["id"] = id
jrow["geoKey"] = geoKey
jrow["slug"] = slug
jrow["name"] = name
results = append(results, jrow)
}
}
data["objects"] = &results
j, err := json.Marshal(data)
if len(results) != 0 {
putInCache(conf.REDIS_CONN, hash, j, conf.CACHE_EXPIRE)
}
return j
}
func getPointOverlays(conf CONFIG) []byte {