func New(conf database.Config, m *mapping.Mapping) (database.DB, error) { db := &PostGIS{} db.Tables = make(map[string]*TableSpec) db.GeneralizedTables = make(map[string]*GeneralizedTableSpec) db.Config = conf if strings.HasPrefix(db.Config.ConnectionParams, "postgis://") { db.Config.ConnectionParams = strings.Replace( db.Config.ConnectionParams, "postgis", "postgres", 1, ) } params, err := pq.ParseURL(db.Config.ConnectionParams) if err != nil { return nil, err } params = disableDefaultSslOnLocalhost(params) db.Prefix = prefixFromConnectionParams(params) for name, table := range m.Tables { db.Tables[name] = NewTableSpec(db, table) } for name, table := range m.GeneralizedTables { db.GeneralizedTables[name] = NewGeneralizedTableSpec(db, table) } db.prepareGeneralizedTableSources() db.prepareGeneralizations() db.pointTagMatcher = m.PointMatcher() db.lineStringTagMatcher = m.LineStringMatcher() db.polygonTagMatcher = m.PolygonMatcher() db.Params = params err = db.Open() if err != nil { return nil, err } return db, nil }
func ReadPbf(cache *osmcache.OSMCache, progress *stats.Statistics, tagmapping *mapping.Mapping, pbfFile *pbf.Pbf, limiter *limit.Limiter, ) { nodes := make(chan []element.Node, 4) coords := make(chan []element.Node, 4) ways := make(chan []element.Way, 4) relations := make(chan []element.Relation, 4) withLimiter := false if limiter != nil { withLimiter = true } if pbfFile.Header.Time.Unix() != 0 { log.Printf("reading %s with data till %v", pbfFile.Filename, pbfFile.Header.Time.Local()) } parser := pbf.NewParser(pbfFile, coords, nodes, ways, relations) coordsSynced := make(chan bool) coordsSync := util.NewSyncPoint(int(nCoords+nNodes), func() { coordsSynced <- true }) parser.NotifyWays(func() { for i := 0; int64(i) < nCoords; i++ { coords <- nil } for i := 0; int64(i) < nNodes; i++ { nodes <- nil } <-coordsSynced }) waysSynced := make(chan bool) waysSync := util.NewSyncPoint(int(nWays), func() { waysSynced <- true }) parser.NotifyRelations(func() { for i := 0; int64(i) < nWays; i++ { ways <- nil } <-waysSynced }) parser.Start() waitWriter := sync.WaitGroup{} for i := 0; int64(i) < nWays; i++ { waitWriter.Add(1) go func() { var skip, hit int m := tagmapping.WayTagFilter() for ws := range ways { if ws == nil { waysSync.Sync() continue } if skipWays { continue } for i, _ := range ws { m.Filter(&ws[i].Tags) if withLimiter { if !cache.Coords.FirstRefIsCached(ws[i].Refs) { ws[i].Id = osmcache.SKIP skip += 1 } else { hit += 1 } } } cache.Ways.PutWays(ws) progress.AddWays(len(ws)) } waitWriter.Done() }() } for i := 0; int64(i) < nRels; i++ { waitWriter.Add(1) go func() { var skip, hit int m := tagmapping.RelationTagFilter() for rels := range relations { numWithTags := 0 for i, _ := range rels { m.Filter(&rels[i].Tags) if len(rels[i].Tags) > 0 { numWithTags += 1 } if withLimiter { if !cache.Ways.FirstMemberIsCached(rels[i].Members) { skip += 1 rels[i].Id = osmcache.SKIP } else { hit += 1 } } } cache.Relations.PutRelations(rels) progress.AddRelations(numWithTags) } waitWriter.Done() }() } for i := 0; int64(i) < nCoords; i++ { waitWriter.Add(1) go func() { var skip, hit int g := geos.NewGeos() defer g.Finish() for nds := range coords { if nds == nil { coordsSync.Sync() continue } if withLimiter { for i, _ := range nds { nd := element.Node{Long: nds[i].Long, Lat: nds[i].Lat} proj.NodeToMerc(&nd) if !limiter.IntersectsBuffer(g, nd.Long, nd.Lat) { skip += 1 nds[i].Id = osmcache.SKIP } else { hit += 1 } } } cache.Coords.PutCoords(nds) progress.AddCoords(len(nds)) } waitWriter.Done() }() } for i := 0; int64(i) < nNodes; i++ { waitWriter.Add(1) go func() { g := geos.NewGeos() defer g.Finish() m := tagmapping.NodeTagFilter() for nds := range nodes { if nds == nil { coordsSync.Sync() continue } numWithTags := 0 for i, _ := range nds { m.Filter(&nds[i].Tags) if len(nds[i].Tags) > 0 { numWithTags += 1 } if withLimiter { nd := element.Node{Long: nds[i].Long, Lat: nds[i].Lat} proj.NodeToMerc(&nd) if !limiter.IntersectsBuffer(g, nd.Long, nd.Lat) { nds[i].Id = osmcache.SKIP } } } cache.Nodes.PutNodes(nds) progress.AddNodes(numWithTags) } waitWriter.Done() }() } parser.Close() close(relations) close(ways) close(nodes) close(coords) waitWriter.Wait() }
func ReadPbf(cache *cache.OSMCache, progress *stats.Statistics, tagmapping *mapping.Mapping, pbfFile *pbf.Pbf) { nodes := make(chan []element.Node, 4) coords := make(chan []element.Node, 4) ways := make(chan []element.Way, 4) relations := make(chan []element.Relation, 4) if pbfFile.Header.Time.Unix() != 0 { log.Printf("reading %s with data till %v", pbfFile.Filename, pbfFile.Header.Time.Local()) } blocks := pbfFile.BlockPositions() waitParser := sync.WaitGroup{} for i := 0; int64(i) < nParser; i++ { waitParser.Add(1) go func() { for block := range blocks { block.Parse( coords, nodes, ways, relations, ) } waitParser.Done() }() } waitWriter := sync.WaitGroup{} for i := 0; int64(i) < nWays; i++ { waitWriter.Add(1) go func() { m := tagmapping.WayTagFilter() for ws := range ways { if skipWays { continue } for i, _ := range ws { m.Filter(&ws[i].Tags) } cache.Ways.PutWays(ws) progress.AddWays(len(ws)) } waitWriter.Done() }() } for i := 0; int64(i) < nRels; i++ { waitWriter.Add(1) go func() { m := tagmapping.RelationTagFilter() for rels := range relations { numWithTags := 0 for i, _ := range rels { m.Filter(&rels[i].Tags) if len(rels[i].Tags) > 0 { numWithTags += 1 } } cache.Relations.PutRelations(rels) progress.AddRelations(numWithTags) } waitWriter.Done() }() } for i := 0; int64(i) < nCoords; i++ { waitWriter.Add(1) go func() { for nds := range coords { if skipCoords { continue } cache.Coords.PutCoords(nds) progress.AddCoords(len(nds)) } waitWriter.Done() }() } for i := 0; int64(i) < nNodes; i++ { waitWriter.Add(1) go func() { m := tagmapping.NodeTagFilter() for nds := range nodes { numWithTags := 0 for i, _ := range nds { m.Filter(&nds[i].Tags) if len(nds[i].Tags) > 0 { numWithTags += 1 } } cache.Nodes.PutNodes(nds) progress.AddNodes(numWithTags) } waitWriter.Done() }() } waitParser.Wait() close(coords) close(nodes) close(ways) close(relations) waitWriter.Wait() }