func Import() { if config.BaseOptions.Quiet { logging.SetQuiet(true) } if (config.ImportOptions.Write || config.ImportOptions.Read != "") && (config.ImportOptions.RevertDeploy || config.ImportOptions.RemoveBackup) { log.Fatal("-revertdeploy and -removebackup not compatible with -read/-write") } if config.ImportOptions.RevertDeploy && (config.ImportOptions.RemoveBackup || config.ImportOptions.DeployProduction) { log.Fatal("-revertdeploy not compatible with -deployproduction/-removebackup") } var geometryLimiter *limit.Limiter if (config.ImportOptions.Write || config.ImportOptions.Read != "") && config.BaseOptions.LimitTo != "" { var err error step := log.StartStep("Reading limitto geometries") geometryLimiter, err = limit.NewFromGeoJsonWithBuffered( config.BaseOptions.LimitTo, config.BaseOptions.LimitToCacheBuffer, ) if err != nil { log.Fatal(err) } log.StopStep(step) } tagmapping, err := mapping.NewMapping(config.BaseOptions.MappingFile) if err != nil { log.Fatal("mapping file: ", err) } var db database.DB if config.ImportOptions.Write || config.ImportOptions.DeployProduction || config.ImportOptions.RevertDeploy || config.ImportOptions.RemoveBackup || config.ImportOptions.Optimize { if config.BaseOptions.Connection == "" { log.Fatal("missing connection option") } conf := database.Config{ ConnectionParams: config.BaseOptions.Connection, Srid: config.BaseOptions.Srid, ImportSchema: config.BaseOptions.Schemas.Import, ProductionSchema: config.BaseOptions.Schemas.Production, BackupSchema: config.BaseOptions.Schemas.Backup, } db, err = database.Open(conf, tagmapping) if err != nil { log.Fatal(err) } defer db.Close() } osmCache := cache.NewOSMCache(config.BaseOptions.CacheDir) if config.ImportOptions.Read != "" && osmCache.Exists() { if config.ImportOptions.Overwritecache { log.Printf("removing existing cache %s", config.BaseOptions.CacheDir) err := osmCache.Remove() if err != nil { log.Fatal("unable to remove cache:", err) } } else if !config.ImportOptions.Appendcache { log.Fatal("cache already exists use -appendcache or -overwritecache") } } step := log.StartStep("Imposm") var elementCounts *stats.ElementCounts if config.ImportOptions.Read != "" { step := log.StartStep("Reading OSM data") err = osmCache.Open() if err != nil { log.Fatal(err) } progress := stats.NewStatsReporter() pbfFile, err := pbf.Open(config.ImportOptions.Read) if err != nil { log.Fatal(err) } osmCache.Coords.SetLinearImport(true) readLimiter := geometryLimiter if config.BaseOptions.LimitToCacheBuffer == 0.0 { readLimiter = nil } reader.ReadPbf(osmCache, progress, tagmapping, pbfFile, readLimiter) osmCache.Coords.SetLinearImport(false) elementCounts = progress.Stop() osmCache.Close() log.StopStep(step) diffstate := state.FromPbf(pbfFile) if diffstate != nil { os.MkdirAll(config.BaseOptions.DiffDir, 0755) err := diffstate.WriteToFile(path.Join(config.BaseOptions.DiffDir, "last.state.txt")) if err != nil { log.Print("error writing last.state.txt: ", err) } } } if config.ImportOptions.Write { stepImport := log.StartStep("Importing OSM data") stepWrite := log.StartStep("Writing OSM data") progress := stats.NewStatsReporterWithEstimate(elementCounts) err = db.Init() if err != nil { log.Fatal(err) } bulkDb, ok := db.(database.BulkBeginner) if ok { err = bulkDb.BeginBulk() } else { err = db.Begin() } if err != nil { log.Fatal(err) } var diffCache *cache.DiffCache if config.ImportOptions.Diff { diffCache = cache.NewDiffCache(config.BaseOptions.CacheDir) if err = diffCache.Remove(); err != nil { log.Fatal(err) } if err = diffCache.Open(); err != nil { log.Fatal(err) } } err = osmCache.Open() if err != nil { log.Fatal(err) } if diffCache != nil { diffCache.Coords.SetLinearImport(true) diffCache.Ways.SetLinearImport(true) } osmCache.Coords.SetReadOnly(true) relations := osmCache.Relations.Iter() relWriter := writer.NewRelationWriter(osmCache, diffCache, relations, db, progress, config.BaseOptions.Srid) relWriter.SetLimiter(geometryLimiter) relWriter.EnableConcurrent() relWriter.Start() relWriter.Wait() // blocks till the Relations.Iter() finishes osmCache.Relations.Close() ways := osmCache.Ways.Iter() wayWriter := writer.NewWayWriter(osmCache, diffCache, ways, db, progress, config.BaseOptions.Srid) wayWriter.SetLimiter(geometryLimiter) wayWriter.EnableConcurrent() wayWriter.Start() wayWriter.Wait() // blocks till the Ways.Iter() finishes osmCache.Ways.Close() nodes := osmCache.Nodes.Iter() nodeWriter := writer.NewNodeWriter(osmCache, nodes, db, progress, config.BaseOptions.Srid) nodeWriter.SetLimiter(geometryLimiter) nodeWriter.EnableConcurrent() nodeWriter.Start() nodeWriter.Wait() // blocks till the Nodes.Iter() finishes osmCache.Close() err = db.End() if err != nil { log.Fatal(err) } progress.Stop() if config.ImportOptions.Diff { diffCache.Close() } log.StopStep(stepWrite) if db, ok := db.(database.Generalizer); ok { if err := db.Generalize(); err != nil { log.Fatal(err) } } else { log.Fatal("database not generalizeable") } if db, ok := db.(database.Finisher); ok { if err := db.Finish(); err != nil { log.Fatal(err) } } else { log.Fatal("database not finishable") } log.StopStep(stepImport) } if config.ImportOptions.Optimize { if db, ok := db.(database.Optimizer); ok { if err := db.Optimize(); err != nil { log.Fatal(err) } } else { log.Fatal("database not optimizable") } } if config.ImportOptions.DeployProduction { if db, ok := db.(database.Deployer); ok { if err := db.Deploy(); err != nil { log.Fatal(err) } } else { log.Fatal("database not deployable") } } if config.ImportOptions.RevertDeploy { if db, ok := db.(database.Deployer); ok { if err := db.RevertDeploy(); err != nil { log.Fatal(err) } } else { log.Fatal("database not deployable") } } if config.ImportOptions.RemoveBackup { if db, ok := db.(database.Deployer); ok { if err := db.RemoveBackup(); err != nil { log.Fatal(err) } } else { log.Fatal("database not deployable") } } log.StopStep(step) }
func mainimport() { if config.ImportOptions.Cpuprofile != "" { f, err := os.Create(config.ImportOptions.Cpuprofile) if err != nil { golog.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } if config.ImportOptions.Httpprofile != "" { stats.StartHttpPProf(config.ImportOptions.Httpprofile) } if config.ImportOptions.Memprofile != "" { parts := strings.Split(config.ImportOptions.Memprofile, string(os.PathListSeparator)) var interval time.Duration if len(parts) < 2 { interval, _ = time.ParseDuration("1m") } else { var err error interval, err = time.ParseDuration(parts[1]) if err != nil { golog.Fatal(err) } } go stats.MemProfiler(parts[0], interval) } if config.ImportOptions.Quiet { logging.SetQuiet(true) } if (config.ImportOptions.Write || config.ImportOptions.Read != "") && (config.ImportOptions.RevertDeploy || config.ImportOptions.RemoveBackup) { log.Fatal("-revertdeploy and -removebackup not compatible with -read/-write") } if config.ImportOptions.RevertDeploy && (config.ImportOptions.RemoveBackup || config.ImportOptions.DeployProduction) { log.Fatal("-revertdeploy not compatible with -deployproduction/-removebackup") } var geometryLimiter *limit.Limiter if config.ImportOptions.Write && config.BaseOptions.LimitTo != "" { var err error step := log.StartStep("Reading limitto geometries") geometryLimiter, err = limit.NewFromOgrSource(config.BaseOptions.LimitTo) if err != nil { log.Fatal(err) } log.StopStep(step) } tagmapping, err := mapping.NewMapping(config.BaseOptions.MappingFile) if err != nil { log.Fatal("mapping file: ", err) } var db database.DB if config.ImportOptions.Write || config.ImportOptions.DeployProduction || config.ImportOptions.RevertDeploy || config.ImportOptions.RemoveBackup || config.ImportOptions.Optimize { if config.BaseOptions.Connection == "" { log.Fatal("missing connection option") } connType := database.ConnectionType(config.BaseOptions.Connection) conf := database.Config{ Type: connType, ConnectionParams: config.BaseOptions.Connection, Srid: config.BaseOptions.Srid, } db, err = database.Open(conf, tagmapping) if err != nil { log.Fatal(err) } } osmCache := cache.NewOSMCache(config.BaseOptions.CacheDir) if config.ImportOptions.Read != "" && osmCache.Exists() { if config.ImportOptions.Overwritecache { log.Printf("removing existing cache %s", config.BaseOptions.CacheDir) err := osmCache.Remove() if err != nil { log.Fatal("unable to remove cache:", err) } } else if !config.ImportOptions.Appendcache { log.Fatal("cache already exists use -appendcache or -overwritecache") } } step := log.StartStep("Imposm") var elementCounts *stats.ElementCounts if config.ImportOptions.Read != "" { step := log.StartStep("Reading OSM data") err = osmCache.Open() if err != nil { log.Fatal(err) } progress := stats.NewStatsReporter() pbfFile, err := pbf.Open(config.ImportOptions.Read) if err != nil { log.Fatal(err) } osmCache.Coords.SetLinearImport(true) reader.ReadPbf(osmCache, progress, tagmapping, pbfFile) osmCache.Coords.SetLinearImport(false) elementCounts = progress.Stop() osmCache.Close() log.StopStep(step) if config.ImportOptions.Diff { diffstate := state.FromPbf(pbfFile) if diffstate != nil { diffstate.WriteToFile(path.Join(config.BaseOptions.CacheDir, "last.state.txt")) } } } if config.ImportOptions.Write { stepImport := log.StartStep("Importing OSM data") stepWrite := log.StartStep("Writing OSM data") progress := stats.NewStatsReporterWithEstimate(elementCounts) err = db.Init() if err != nil { log.Fatal(err) } bulkDb, ok := db.(database.BulkBeginner) if ok { err = bulkDb.BeginBulk() } else { err = db.Begin() } if err != nil { log.Fatal(err) } var diffCache *cache.DiffCache if config.ImportOptions.Diff { diffCache = cache.NewDiffCache(config.BaseOptions.CacheDir) if err = diffCache.Remove(); err != nil { log.Fatal(err) } if err = diffCache.Open(); err != nil { log.Fatal(err) } } err = osmCache.Open() if err != nil { log.Fatal(err) } if diffCache != nil { diffCache.Coords.SetLinearImport(true) diffCache.Ways.SetLinearImport(true) } osmCache.Coords.SetReadOnly(true) pointsTagMatcher := tagmapping.PointMatcher() lineStringsTagMatcher := tagmapping.LineStringMatcher() polygonsTagMatcher := tagmapping.PolygonMatcher() relations := osmCache.Relations.Iter() relWriter := writer.NewRelationWriter(osmCache, diffCache, relations, db, polygonsTagMatcher, progress, config.BaseOptions.Srid) relWriter.SetLimiter(geometryLimiter) relWriter.Start() // blocks till the Relations.Iter() finishes relWriter.Close() osmCache.Relations.Close() ways := osmCache.Ways.Iter() wayWriter := writer.NewWayWriter(osmCache, diffCache, ways, db, lineStringsTagMatcher, polygonsTagMatcher, progress, config.BaseOptions.Srid) wayWriter.SetLimiter(geometryLimiter) wayWriter.Start() // blocks till the Ways.Iter() finishes wayWriter.Close() osmCache.Ways.Close() nodes := osmCache.Nodes.Iter() nodeWriter := writer.NewNodeWriter(osmCache, nodes, db, pointsTagMatcher, progress, config.BaseOptions.Srid) nodeWriter.SetLimiter(geometryLimiter) nodeWriter.Start() // blocks till the Nodes.Iter() finishes nodeWriter.Close() osmCache.Close() err = db.End() if err != nil { log.Fatal(err) } progress.Stop() if config.ImportOptions.Diff { diffCache.Close() } log.StopStep(stepWrite) if db, ok := db.(database.Generalizer); ok { if err := db.Generalize(); err != nil { log.Fatal(err) } } else { log.Fatal("database not generalizeable") } if db, ok := db.(database.Finisher); ok { if err := db.Finish(); err != nil { log.Fatal(err) } } else { log.Fatal("database not finishable") } log.StopStep(stepImport) } if config.ImportOptions.Optimize { if db, ok := db.(database.Optimizer); ok { if err := db.Optimize(); err != nil { log.Fatal(err) } } else { log.Fatal("database not optimizable") } } if config.ImportOptions.DeployProduction { if db, ok := db.(database.Deployer); ok { if err := db.Deploy(); err != nil { log.Fatal(err) } } else { log.Fatal("database not deployable") } } if config.ImportOptions.RevertDeploy { if db, ok := db.(database.Deployer); ok { if err := db.RevertDeploy(); err != nil { log.Fatal(err) } } else { log.Fatal("database not deployable") } } if config.ImportOptions.RemoveBackup { if db, ok := db.(database.Deployer); ok { if err := db.RemoveBackup(); err != nil { log.Fatal(err) } } else { log.Fatal("database not deployable") } } log.StopStep(step) }
func Update(oscFile string, geometryLimiter *limit.Limiter, force bool) { state, err := diffstate.ParseFromOsc(oscFile) if err != nil { log.Fatal(err) } lastState, err := diffstate.ParseLastState(config.BaseOptions.CacheDir) if err != nil { log.Fatal(err) } if lastState != nil && lastState.Sequence != 0 && state != nil && state.Sequence <= lastState.Sequence { if !force { log.Warn(state, " already imported") return } } defer log.StopStep(log.StartStep(fmt.Sprintf("Processing %s", oscFile))) elems, errc := parser.Parse(oscFile) osmCache := cache.NewOSMCache(config.BaseOptions.CacheDir) err = osmCache.Open() if err != nil { log.Fatal("osm cache: ", err) } diffCache := cache.NewDiffCache(config.BaseOptions.CacheDir) err = diffCache.Open() if err != nil { log.Fatal("diff cache: ", err) } tagmapping, err := mapping.NewMapping(config.BaseOptions.MappingFile) if err != nil { log.Fatal(err) } connType := database.ConnectionType(config.BaseOptions.Connection) dbConf := database.Config{ Type: connType, ConnectionParams: config.BaseOptions.Connection, Srid: config.BaseOptions.Srid, } db, err := database.Open(dbConf, tagmapping) if err != nil { log.Fatal("database open: ", err) } err = db.Begin() if err != nil { log.Fatal(err) } delDb, ok := db.(database.Deleter) if !ok { log.Fatal("database not deletable") } deleter := NewDeleter( delDb, osmCache, diffCache, tagmapping.PointMatcher(), tagmapping.LineStringMatcher(), tagmapping.PolygonMatcher(), ) progress := stats.NewStatsReporter() expiredTiles := expire.NewTiles(14) relTagFilter := tagmapping.RelationTagFilter() wayTagFilter := tagmapping.WayTagFilter() nodeTagFilter := tagmapping.NodeTagFilter() pointsTagMatcher := tagmapping.PointMatcher() lineStringsTagMatcher := tagmapping.LineStringMatcher() polygonsTagMatcher := tagmapping.PolygonMatcher() relations := make(chan *element.Relation) ways := make(chan *element.Way) nodes := make(chan *element.Node) relWriter := writer.NewRelationWriter(osmCache, diffCache, relations, db, polygonsTagMatcher, progress, config.BaseOptions.Srid) relWriter.SetLimiter(geometryLimiter) relWriter.SetExpireTiles(expiredTiles) relWriter.Start() wayWriter := writer.NewWayWriter(osmCache, diffCache, ways, db, lineStringsTagMatcher, polygonsTagMatcher, progress, config.BaseOptions.Srid) wayWriter.SetLimiter(geometryLimiter) wayWriter.SetExpireTiles(expiredTiles) wayWriter.Start() nodeWriter := writer.NewNodeWriter(osmCache, nodes, db, pointsTagMatcher, progress, config.BaseOptions.Srid) nodeWriter.SetLimiter(geometryLimiter) nodeWriter.Start() nodeIds := make(map[int64]bool) wayIds := make(map[int64]bool) relIds := make(map[int64]bool) step := log.StartStep("Parsing changes, updating cache and removing elements") g := geos.NewGeos() For: for { select { case elem := <-elems: if elem.Rel != nil { relTagFilter.Filter(&elem.Rel.Tags) progress.AddRelations(1) } else if elem.Way != nil { wayTagFilter.Filter(&elem.Way.Tags) progress.AddWays(1) } else if elem.Node != nil { nodeTagFilter.Filter(&elem.Node.Tags) if len(elem.Node.Tags) > 0 { progress.AddNodes(1) } progress.AddCoords(1) } if elem.Del { deleter.Delete(elem) if !elem.Add { if elem.Rel != nil { if err := osmCache.Relations.DeleteRelation(elem.Rel.Id); err != nil { log.Fatal(err) } } else if elem.Way != nil { if err := osmCache.Ways.DeleteWay(elem.Way.Id); err != nil { log.Fatal(err) } diffCache.Ways.Delete(elem.Way.Id) } else if elem.Node != nil { if err := osmCache.Nodes.DeleteNode(elem.Node.Id); err != nil { log.Fatal(err) } if err := osmCache.Coords.DeleteCoord(elem.Node.Id); err != nil { log.Fatal(err) } } } } if elem.Add { if elem.Rel != nil { // check if first member is cached to avoid caching // unneeded relations (typical outside of our coverage) if memberIsCached(elem.Rel.Members, osmCache.Ways) { osmCache.Relations.PutRelation(elem.Rel) relIds[elem.Rel.Id] = true } } else if elem.Way != nil { // check if first coord is cached to avoid caching // unneeded ways (typical outside of our coverage) if coordIsCached(elem.Way.Refs, osmCache.Coords) { osmCache.Ways.PutWay(elem.Way) wayIds[elem.Way.Id] = true } } else if elem.Node != nil { if geometryLimiter == nil || geometryLimiter.IntersectsBuffer(g, elem.Node.Long, elem.Node.Lat) { osmCache.Nodes.PutNode(elem.Node) osmCache.Coords.PutCoords([]element.Node{*elem.Node}) nodeIds[elem.Node.Id] = true } } } case err := <-errc: if err != io.EOF { log.Fatal(err) } break For } } progress.Stop() log.StopStep(step) step = log.StartStep("Writing added/modified elements") progress = stats.NewStatsReporter() for nodeId, _ := range nodeIds { node, err := osmCache.Nodes.GetNode(nodeId) if err != nil { if err != cache.NotFound { log.Print(node, err) } // missing nodes can still be Coords // no `continue` here } if node != nil { // insert new node nodes <- node } dependers := diffCache.Coords.Get(nodeId) // mark depending ways for (re)insert for _, way := range dependers { wayIds[way] = true } } for wayId, _ := range wayIds { way, err := osmCache.Ways.GetWay(wayId) if err != nil { if err != cache.NotFound { log.Print(way, err) } continue } // insert new way ways <- way dependers := diffCache.Ways.Get(wayId) // mark depending relations for (re)insert for _, rel := range dependers { relIds[rel] = true } } for relId, _ := range relIds { rel, err := osmCache.Relations.GetRelation(relId) if err != nil { if err != cache.NotFound { log.Print(rel, err) } continue } // insert new relation relations <- rel } close(relations) close(ways) close(nodes) nodeWriter.Close() relWriter.Close() wayWriter.Close() err = db.End() if err != nil { log.Fatal(err) } err = db.Close() if err != nil { log.Fatal(err) } osmCache.Close() diffCache.Close() log.StopStep(step) step = log.StartStep("Updating expired tiles db") expire.WriteTileExpireDb( expiredTiles.SortedTiles(), "/tmp/expire_tiles.db", ) log.StopStep(step) progress.Stop() if state != nil { err = diffstate.WriteLastState(config.BaseOptions.CacheDir, state) if err != nil { log.Warn(err) // warn only } } }
func Update(oscFile string, geometryLimiter *limit.Limiter, expireor expire.Expireor, osmCache *cache.OSMCache, diffCache *cache.DiffCache, force bool) error { state, err := diffstate.ParseFromOsc(oscFile) if err != nil { return err } lastState, err := diffstate.ParseLastState(config.BaseOptions.DiffDir) if err != nil { log.Warn(err) } if lastState != nil && lastState.Sequence != 0 && state != nil && state.Sequence <= lastState.Sequence { if !force { log.Warn(state, " already imported") return nil } } defer log.StopStep(log.StartStep(fmt.Sprintf("Processing %s", oscFile))) elems, errc := parser.Parse(oscFile) tagmapping, err := mapping.NewMapping(config.BaseOptions.MappingFile) if err != nil { return err } dbConf := database.Config{ ConnectionParams: config.BaseOptions.Connection, Srid: config.BaseOptions.Srid, // we apply diff imports on the Production schema ImportSchema: config.BaseOptions.Schemas.Production, ProductionSchema: config.BaseOptions.Schemas.Production, BackupSchema: config.BaseOptions.Schemas.Backup, } db, err := database.Open(dbConf, tagmapping) if err != nil { return errors.New("database open: " + err.Error()) } defer db.Close() err = db.Begin() if err != nil { return err } delDb, ok := db.(database.Deleter) if !ok { return errors.New("database not deletable") } genDb, ok := db.(database.Generalizer) if ok { genDb.EnableGeneralizeUpdates() } deleter := NewDeleter( delDb, osmCache, diffCache, tagmapping.PointMatcher(), tagmapping.LineStringMatcher(), tagmapping.PolygonMatcher(), ) progress := stats.NewStatsReporter() relTagFilter := tagmapping.RelationTagFilter() wayTagFilter := tagmapping.WayTagFilter() nodeTagFilter := tagmapping.NodeTagFilter() relations := make(chan *element.Relation) ways := make(chan *element.Way) nodes := make(chan *element.Node) relWriter := writer.NewRelationWriter(osmCache, diffCache, relations, db, progress, config.BaseOptions.Srid) relWriter.SetLimiter(geometryLimiter) relWriter.SetExpireor(expireor) relWriter.Start() wayWriter := writer.NewWayWriter(osmCache, diffCache, ways, db, progress, config.BaseOptions.Srid) wayWriter.SetLimiter(geometryLimiter) wayWriter.SetExpireor(expireor) wayWriter.Start() nodeWriter := writer.NewNodeWriter(osmCache, nodes, db, progress, config.BaseOptions.Srid) nodeWriter.SetLimiter(geometryLimiter) nodeWriter.SetExpireor(expireor) nodeWriter.Start() nodeIds := make(map[int64]bool) wayIds := make(map[int64]bool) relIds := make(map[int64]bool) step := log.StartStep("Parsing changes, updating cache and removing elements") g := geos.NewGeos() For: for { select { case elem := <-elems: if elem.Rel != nil { relTagFilter.Filter(&elem.Rel.Tags) progress.AddRelations(1) } else if elem.Way != nil { wayTagFilter.Filter(&elem.Way.Tags) progress.AddWays(1) } else if elem.Node != nil { nodeTagFilter.Filter(&elem.Node.Tags) if len(elem.Node.Tags) > 0 { progress.AddNodes(1) } progress.AddCoords(1) } if elem.Del { if err := deleter.Delete(elem); err != nil { return err } if !elem.Add { if elem.Rel != nil { if err := osmCache.Relations.DeleteRelation(elem.Rel.Id); err != nil { return err } } else if elem.Way != nil { if err := osmCache.Ways.DeleteWay(elem.Way.Id); err != nil { return err } diffCache.Ways.Delete(elem.Way.Id) } else if elem.Node != nil { if err := osmCache.Nodes.DeleteNode(elem.Node.Id); err != nil { return err } if err := osmCache.Coords.DeleteCoord(elem.Node.Id); err != nil { return err } } } } if elem.Add { if elem.Rel != nil { // check if first member is cached to avoid caching // unneeded relations (typical outside of our coverage) if osmCache.Ways.FirstMemberIsCached(elem.Rel.Members) { osmCache.Relations.PutRelation(elem.Rel) relIds[elem.Rel.Id] = true } } else if elem.Way != nil { // check if first coord is cached to avoid caching // unneeded ways (typical outside of our coverage) if osmCache.Coords.FirstRefIsCached(elem.Way.Refs) { osmCache.Ways.PutWay(elem.Way) wayIds[elem.Way.Id] = true } } else if elem.Node != nil { addNode := true if geometryLimiter != nil { nd := element.Node{Long: elem.Node.Long, Lat: elem.Node.Lat} proj.NodeToMerc(&nd) if !geometryLimiter.IntersectsBuffer(g, nd.Long, nd.Lat) { addNode = false } } if addNode { osmCache.Nodes.PutNode(elem.Node) osmCache.Coords.PutCoords([]element.Node{*elem.Node}) nodeIds[elem.Node.Id] = true } } } case err := <-errc: if err != io.EOF { return err } break For } } // mark member ways from deleted relations for re-insert for id, _ := range deleter.DeletedMemberWays() { wayIds[id] = true } progress.Stop() log.StopStep(step) step = log.StartStep("Writing added/modified elements") progress = stats.NewStatsReporter() // mark depending ways for (re)insert for nodeId, _ := range nodeIds { dependers := diffCache.Coords.Get(nodeId) for _, way := range dependers { wayIds[way] = true } } // mark depending relations for (re)insert for wayId, _ := range wayIds { dependers := diffCache.Ways.Get(wayId) // mark depending relations for (re)insert for _, rel := range dependers { relIds[rel] = true } } for relId, _ := range relIds { rel, err := osmCache.Relations.GetRelation(relId) if err != nil { if err != cache.NotFound { log.Print(rel, err) } continue } // insert new relation progress.AddRelations(1) relations <- rel } for wayId, _ := range wayIds { way, err := osmCache.Ways.GetWay(wayId) if err != nil { if err != cache.NotFound { log.Print(way, err) } continue } // insert new way progress.AddWays(1) ways <- way } for nodeId, _ := range nodeIds { node, err := osmCache.Nodes.GetNode(nodeId) if err != nil { if err != cache.NotFound { log.Print(node, err) } // missing nodes can still be Coords // no `continue` here } if node != nil { // insert new node progress.AddNodes(1) nodes <- node } } close(relations) close(ways) close(nodes) nodeWriter.Wait() relWriter.Wait() wayWriter.Wait() if genDb != nil { genDb.GeneralizeUpdates() } err = db.End() if err != nil { return err } err = db.Close() if err != nil { return err } log.StopStep(step) progress.Stop() if state != nil { if lastState != nil { state.Url = lastState.Url } err = diffstate.WriteLastState(config.BaseOptions.DiffDir, state) if err != nil { log.Warn(err) // warn only } } return nil }