func Update(oscFile string, geometryLimiter *limit.Limiter, expireor expire.Expireor, osmCache *cache.OSMCache, diffCache *cache.DiffCache, force bool) error { state, err := diffstate.ParseFromOsc(oscFile) if err != nil { return err } lastState, err := diffstate.ParseLastState(config.BaseOptions.DiffDir) if err != nil { log.Warn(err) } if lastState != nil && lastState.Sequence != 0 && state != nil && state.Sequence <= lastState.Sequence { if !force { log.Warn(state, " already imported") return nil } } defer log.StopStep(log.StartStep(fmt.Sprintf("Processing %s", oscFile))) elems, errc := parser.Parse(oscFile) tagmapping, err := mapping.NewMapping(config.BaseOptions.MappingFile) if err != nil { return err } dbConf := database.Config{ ConnectionParams: config.BaseOptions.Connection, Srid: config.BaseOptions.Srid, // we apply diff imports on the Production schema ImportSchema: config.BaseOptions.Schemas.Production, ProductionSchema: config.BaseOptions.Schemas.Production, BackupSchema: config.BaseOptions.Schemas.Backup, } db, err := database.Open(dbConf, tagmapping) if err != nil { return errors.New("database open: " + err.Error()) } defer db.Close() err = db.Begin() if err != nil { return err } delDb, ok := db.(database.Deleter) if !ok { return errors.New("database not deletable") } genDb, ok := db.(database.Generalizer) if ok { genDb.EnableGeneralizeUpdates() } deleter := NewDeleter( delDb, osmCache, diffCache, tagmapping.SingleIdSpace, tagmapping.PointMatcher(), tagmapping.LineStringMatcher(), tagmapping.PolygonMatcher(), ) progress := stats.NewStatsReporter() relTagFilter := tagmapping.RelationTagFilter() wayTagFilter := tagmapping.WayTagFilter() nodeTagFilter := tagmapping.NodeTagFilter() relations := make(chan *element.Relation) ways := make(chan *element.Way) nodes := make(chan *element.Node) relWriter := writer.NewRelationWriter(osmCache, diffCache, tagmapping.SingleIdSpace, relations, db, progress, tagmapping.PolygonMatcher(), config.BaseOptions.Srid) relWriter.SetLimiter(geometryLimiter) relWriter.SetExpireor(expireor) relWriter.Start() wayWriter := writer.NewWayWriter(osmCache, diffCache, tagmapping.SingleIdSpace, ways, db, progress, tagmapping.PolygonMatcher(), tagmapping.LineStringMatcher(), config.BaseOptions.Srid) wayWriter.SetLimiter(geometryLimiter) wayWriter.SetExpireor(expireor) wayWriter.Start() nodeWriter := writer.NewNodeWriter(osmCache, nodes, db, progress, tagmapping.PointMatcher(), config.BaseOptions.Srid) nodeWriter.SetLimiter(geometryLimiter) nodeWriter.SetExpireor(expireor) nodeWriter.Start() nodeIds := make(map[int64]bool) wayIds := make(map[int64]bool) relIds := make(map[int64]bool) step := log.StartStep("Parsing changes, updating cache and removing elements") g := geos.NewGeos() For: for { select { case elem := <-elems: if elem.Rel != nil { relTagFilter.Filter(&elem.Rel.Tags) progress.AddRelations(1) } else if elem.Way != nil { wayTagFilter.Filter(&elem.Way.Tags) progress.AddWays(1) } else if elem.Node != nil { nodeTagFilter.Filter(&elem.Node.Tags) if len(elem.Node.Tags) > 0 { progress.AddNodes(1) } progress.AddCoords(1) } if elem.Del { if err := deleter.Delete(elem); err != nil { return err } if !elem.Add { // no new or modified elem -> remove from cache if elem.Rel != nil { if err := osmCache.Relations.DeleteRelation(elem.Rel.Id); err != nil { return err } } else if elem.Way != nil { if err := osmCache.Ways.DeleteWay(elem.Way.Id); err != nil { return err } diffCache.Ways.Delete(elem.Way.Id) } else if elem.Node != nil { if err := osmCache.Nodes.DeleteNode(elem.Node.Id); err != nil { return err } if err := osmCache.Coords.DeleteCoord(elem.Node.Id); err != nil { return err } } } else if elem.Node != nil && elem.Node.Tags == nil { // handle modifies where a node drops all tags if err := osmCache.Nodes.DeleteNode(elem.Node.Id); err != nil { return err } } } if elem.Add { if elem.Rel != nil { // check if first member is cached to avoid caching // unneeded relations (typical outside of our coverage) if osmCache.Ways.FirstMemberIsCached(elem.Rel.Members) { osmCache.Relations.PutRelation(elem.Rel) relIds[elem.Rel.Id] = true } } else if elem.Way != nil { // check if first coord is cached to avoid caching // unneeded ways (typical outside of our coverage) if osmCache.Coords.FirstRefIsCached(elem.Way.Refs) { osmCache.Ways.PutWay(elem.Way) wayIds[elem.Way.Id] = true } } else if elem.Node != nil { addNode := true if geometryLimiter != nil { nd := element.Node{Long: elem.Node.Long, Lat: elem.Node.Lat} proj.NodeToMerc(&nd) if !geometryLimiter.IntersectsBuffer(g, nd.Long, nd.Lat) { addNode = false } } if addNode { osmCache.Nodes.PutNode(elem.Node) osmCache.Coords.PutCoords([]element.Node{*elem.Node}) nodeIds[elem.Node.Id] = true } } } case err := <-errc: if err != io.EOF { return err } break For } } // mark member ways from deleted relations for re-insert for id, _ := range deleter.DeletedMemberWays() { wayIds[id] = true } progress.Stop() log.StopStep(step) step = log.StartStep("Writing added/modified elements") progress = stats.NewStatsReporter() // mark depending ways for (re)insert for nodeId, _ := range nodeIds { dependers := diffCache.Coords.Get(nodeId) for _, way := range dependers { wayIds[way] = true } } // mark depending relations for (re)insert for wayId, _ := range wayIds { dependers := diffCache.Ways.Get(wayId) // mark depending relations for (re)insert for _, rel := range dependers { relIds[rel] = true } } for relId, _ := range relIds { rel, err := osmCache.Relations.GetRelation(relId) if err != nil { if err != cache.NotFound { log.Print(rel, err) } continue } // insert new relation progress.AddRelations(1) relations <- rel } for wayId, _ := range wayIds { way, err := osmCache.Ways.GetWay(wayId) if err != nil { if err != cache.NotFound { log.Print(way, err) } continue } // insert new way progress.AddWays(1) ways <- way } for nodeId, _ := range nodeIds { node, err := osmCache.Nodes.GetNode(nodeId) if err != nil { if err != cache.NotFound { log.Print(node, err) } // missing nodes can still be Coords // no `continue` here } if node != nil { // insert new node progress.AddNodes(1) nodes <- node } } close(relations) close(ways) close(nodes) nodeWriter.Wait() relWriter.Wait() wayWriter.Wait() if genDb != nil { genDb.GeneralizeUpdates() } err = db.End() if err != nil { return err } err = db.Close() if err != nil { return err } log.StopStep(step) progress.Stop() if state != nil { if lastState != nil { state.Url = lastState.Url } err = diffstate.WriteLastState(config.BaseOptions.DiffDir, state) if err != nil { log.Warn(err) // warn only } } return nil }
func ReadPbf(cache *osmcache.OSMCache, progress *stats.Statistics, tagmapping *mapping.Mapping, pbfFile *pbf.Pbf, limiter *limit.Limiter, ) { nodes := make(chan []element.Node, 4) coords := make(chan []element.Node, 4) ways := make(chan []element.Way, 4) relations := make(chan []element.Relation, 4) withLimiter := false if limiter != nil { withLimiter = true } if pbfFile.Header.Time.Unix() != 0 { log.Printf("reading %s with data till %v", pbfFile.Filename, pbfFile.Header.Time.Local()) } parser := pbf.NewParser(pbfFile, coords, nodes, ways, relations) // wait for all coords/nodes to be processed before continuing with // ways. required for -limitto checks coordsSync := sync.WaitGroup{} parser.FinishedCoords(func() { for i := 0; int64(i) < nCoords; i++ { coords <- nil } for i := 0; int64(i) < nNodes; i++ { nodes <- nil } coordsSync.Wait() }) // wait for all ways to be processed before continuing with // relations. required for -limitto checks waysSync := sync.WaitGroup{} parser.FinishedWays(func() { for i := 0; int64(i) < nWays; i++ { ways <- nil } waysSync.Wait() }) waitWriter := sync.WaitGroup{} for i := 0; int64(i) < nWays; i++ { waysSync.Add(1) waitWriter.Add(1) go func() { var skip, hit int m := tagmapping.WayTagFilter() for ws := range ways { if ws == nil { waysSync.Done() waysSync.Wait() continue } if skipWays { continue } for i, _ := range ws { m.Filter(&ws[i].Tags) if withLimiter { cached, err := cache.Coords.FirstRefIsCached(ws[i].Refs) if err != nil { log.Errorf("error while checking for cached refs of way %d: %v", ws[i].Id, err) cached = true // don't skip in case of error } if cached { hit += 1 } else { ws[i].Id = osmcache.SKIP skip += 1 } } } err := cache.Ways.PutWays(ws) if err != nil { log.Errorf("error while caching ways: %v", err) } progress.AddWays(len(ws)) } waitWriter.Done() }() } for i := 0; int64(i) < nRels; i++ { waitWriter.Add(1) go func() { var skip, hit int m := tagmapping.RelationTagFilter() for rels := range relations { numWithTags := 0 for i, _ := range rels { m.Filter(&rels[i].Tags) if len(rels[i].Tags) > 0 { numWithTags += 1 } if withLimiter { cached, err := cache.Ways.FirstMemberIsCached(rels[i].Members) if err != nil { log.Errorf("error while checking for cached members of relation %d: %v", rels[i].Id, err) cached = true // don't skip in case of error } if cached { hit += 1 } else { skip += 1 rels[i].Id = osmcache.SKIP } } } err := cache.Relations.PutRelations(rels) if err != nil { log.Errorf("error while caching relation: %v", err) } progress.AddRelations(numWithTags) } waitWriter.Done() }() } for i := 0; int64(i) < nCoords; i++ { coordsSync.Add(1) waitWriter.Add(1) go func() { var skip, hit int g := geos.NewGeos() defer g.Finish() for nds := range coords { if nds == nil { coordsSync.Done() coordsSync.Wait() continue } if withLimiter { for i, _ := range nds { if !limiter.IntersectsBuffer(g, nds[i].Long, nds[i].Lat) { skip += 1 nds[i].Id = osmcache.SKIP } else { hit += 1 } } } cache.Coords.PutCoords(nds) progress.AddCoords(len(nds)) } waitWriter.Done() }() } for i := 0; int64(i) < nNodes; i++ { coordsSync.Add(1) waitWriter.Add(1) go func() { g := geos.NewGeos() defer g.Finish() m := tagmapping.NodeTagFilter() for nds := range nodes { if nds == nil { coordsSync.Done() coordsSync.Wait() continue } numWithTags := 0 for i, _ := range nds { m.Filter(&nds[i].Tags) if len(nds[i].Tags) > 0 { numWithTags += 1 } if withLimiter { if !limiter.IntersectsBuffer(g, nds[i].Long, nds[i].Lat) { nds[i].Id = osmcache.SKIP } } } cache.Nodes.PutNodes(nds) progress.AddNodes(numWithTags) } waitWriter.Done() }() } parser.Parse() waitWriter.Wait() }
func ReadPbf(cache *osmcache.OSMCache, progress *stats.Statistics, tagmapping *mapping.Mapping, pbfFile *pbf.Pbf, limiter *limit.Limiter, ) { nodes := make(chan []element.Node, 4) coords := make(chan []element.Node, 4) ways := make(chan []element.Way, 4) relations := make(chan []element.Relation, 4) withLimiter := false if limiter != nil { withLimiter = true } if pbfFile.Header.Time.Unix() != 0 { log.Printf("reading %s with data till %v", pbfFile.Filename, pbfFile.Header.Time.Local()) } parser := pbf.NewParser(pbfFile, coords, nodes, ways, relations) coordsSynced := make(chan bool) coordsSync := util.NewSyncPoint(int(nCoords+nNodes), func() { coordsSynced <- true }) parser.NotifyWays(func() { for i := 0; int64(i) < nCoords; i++ { coords <- nil } for i := 0; int64(i) < nNodes; i++ { nodes <- nil } <-coordsSynced }) waysSynced := make(chan bool) waysSync := util.NewSyncPoint(int(nWays), func() { waysSynced <- true }) parser.NotifyRelations(func() { for i := 0; int64(i) < nWays; i++ { ways <- nil } <-waysSynced }) parser.Start() waitWriter := sync.WaitGroup{} for i := 0; int64(i) < nWays; i++ { waitWriter.Add(1) go func() { var skip, hit int m := tagmapping.WayTagFilter() for ws := range ways { if ws == nil { waysSync.Sync() continue } if skipWays { continue } for i, _ := range ws { m.Filter(&ws[i].Tags) if withLimiter { if !cache.Coords.FirstRefIsCached(ws[i].Refs) { ws[i].Id = osmcache.SKIP skip += 1 } else { hit += 1 } } } cache.Ways.PutWays(ws) progress.AddWays(len(ws)) } waitWriter.Done() }() } for i := 0; int64(i) < nRels; i++ { waitWriter.Add(1) go func() { var skip, hit int m := tagmapping.RelationTagFilter() for rels := range relations { numWithTags := 0 for i, _ := range rels { m.Filter(&rels[i].Tags) if len(rels[i].Tags) > 0 { numWithTags += 1 } if withLimiter { if !cache.Ways.FirstMemberIsCached(rels[i].Members) { skip += 1 rels[i].Id = osmcache.SKIP } else { hit += 1 } } } cache.Relations.PutRelations(rels) progress.AddRelations(numWithTags) } waitWriter.Done() }() } for i := 0; int64(i) < nCoords; i++ { waitWriter.Add(1) go func() { var skip, hit int g := geos.NewGeos() defer g.Finish() for nds := range coords { if nds == nil { coordsSync.Sync() continue } if withLimiter { for i, _ := range nds { nd := element.Node{Long: nds[i].Long, Lat: nds[i].Lat} proj.NodeToMerc(&nd) if !limiter.IntersectsBuffer(g, nd.Long, nd.Lat) { skip += 1 nds[i].Id = osmcache.SKIP } else { hit += 1 } } } cache.Coords.PutCoords(nds) progress.AddCoords(len(nds)) } waitWriter.Done() }() } for i := 0; int64(i) < nNodes; i++ { waitWriter.Add(1) go func() { g := geos.NewGeos() defer g.Finish() m := tagmapping.NodeTagFilter() for nds := range nodes { if nds == nil { coordsSync.Sync() continue } numWithTags := 0 for i, _ := range nds { m.Filter(&nds[i].Tags) if len(nds[i].Tags) > 0 { numWithTags += 1 } if withLimiter { nd := element.Node{Long: nds[i].Long, Lat: nds[i].Lat} proj.NodeToMerc(&nd) if !limiter.IntersectsBuffer(g, nd.Long, nd.Lat) { nds[i].Id = osmcache.SKIP } } } cache.Nodes.PutNodes(nds) progress.AddNodes(numWithTags) } waitWriter.Done() }() } parser.Close() close(relations) close(ways) close(nodes) close(coords) waitWriter.Wait() }