示例#1
0
func Query(args []string) {
	flags.Usage = Usage

	if len(args) == 0 {
		Usage()
	}

	err := flags.Parse(args)
	if err != nil {
		log.Fatal(err)
	}
	log.SetFlags(0)
	log.SetOutput(os.Stdout)

	osmCache := cache.NewOSMCache(*cachedir)
	err = osmCache.Open()
	if err != nil {
		log.Fatal(err)
	}
	diffCache := cache.NewDiffCache(*cachedir)
	err = diffCache.Open()
	if err != nil {
		log.Fatal(err)
	}

	if *full && *deps {
		log.Fatal("cannot use -full and -deps option together")
	}

	result := result{}

	if *relIds != "" {
		ids := splitIds(*relIds)
		result.Relations = collectRelations(osmCache, ids, *full)
	}

	if *wayIds != "" {
		ids := splitIds(*wayIds)
		result.Ways = collectWays(osmCache, diffCache, ids, *full, *deps)
	}

	if *nodeIds != "" {
		ids := splitIds(*nodeIds)
		result.Nodes = collectNodes(osmCache, diffCache, ids, *deps)
	}

	printJson(result)
}
示例#2
0
func Import() {
	if config.BaseOptions.Quiet {
		logging.SetQuiet(true)
	}

	if (config.ImportOptions.Write || config.ImportOptions.Read != "") && (config.ImportOptions.RevertDeploy || config.ImportOptions.RemoveBackup) {
		log.Fatal("-revertdeploy and -removebackup not compatible with -read/-write")
	}

	if config.ImportOptions.RevertDeploy && (config.ImportOptions.RemoveBackup || config.ImportOptions.DeployProduction) {
		log.Fatal("-revertdeploy not compatible with -deployproduction/-removebackup")
	}

	var geometryLimiter *limit.Limiter
	if (config.ImportOptions.Write || config.ImportOptions.Read != "") && config.BaseOptions.LimitTo != "" {
		var err error
		step := log.StartStep("Reading limitto geometries")
		geometryLimiter, err = limit.NewFromGeoJsonWithBuffered(
			config.BaseOptions.LimitTo,
			config.BaseOptions.LimitToCacheBuffer,
		)
		if err != nil {
			log.Fatal(err)
		}
		log.StopStep(step)
	}

	tagmapping, err := mapping.NewMapping(config.BaseOptions.MappingFile)
	if err != nil {
		log.Fatal("mapping file: ", err)
	}

	var db database.DB

	if config.ImportOptions.Write || config.ImportOptions.DeployProduction || config.ImportOptions.RevertDeploy || config.ImportOptions.RemoveBackup || config.ImportOptions.Optimize {
		if config.BaseOptions.Connection == "" {
			log.Fatal("missing connection option")
		}
		conf := database.Config{
			ConnectionParams: config.BaseOptions.Connection,
			Srid:             config.BaseOptions.Srid,
			ImportSchema:     config.BaseOptions.Schemas.Import,
			ProductionSchema: config.BaseOptions.Schemas.Production,
			BackupSchema:     config.BaseOptions.Schemas.Backup,
		}
		db, err = database.Open(conf, tagmapping)
		if err != nil {
			log.Fatal(err)
		}
		defer db.Close()
	}

	osmCache := cache.NewOSMCache(config.BaseOptions.CacheDir)

	if config.ImportOptions.Read != "" && osmCache.Exists() {
		if config.ImportOptions.Overwritecache {
			log.Printf("removing existing cache %s", config.BaseOptions.CacheDir)
			err := osmCache.Remove()
			if err != nil {
				log.Fatal("unable to remove cache:", err)
			}
		} else if !config.ImportOptions.Appendcache {
			log.Fatal("cache already exists use -appendcache or -overwritecache")
		}
	}

	step := log.StartStep("Imposm")

	var elementCounts *stats.ElementCounts

	if config.ImportOptions.Read != "" {
		step := log.StartStep("Reading OSM data")
		err = osmCache.Open()
		if err != nil {
			log.Fatal(err)
		}
		progress := stats.NewStatsReporter()

		pbfFile, err := pbf.Open(config.ImportOptions.Read)
		if err != nil {
			log.Fatal(err)
		}

		osmCache.Coords.SetLinearImport(true)
		readLimiter := geometryLimiter
		if config.BaseOptions.LimitToCacheBuffer == 0.0 {
			readLimiter = nil
		}
		reader.ReadPbf(osmCache, progress, tagmapping,
			pbfFile, readLimiter)

		osmCache.Coords.SetLinearImport(false)
		elementCounts = progress.Stop()
		osmCache.Close()
		log.StopStep(step)
		diffstate := state.FromPbf(pbfFile)
		if diffstate != nil {
			os.MkdirAll(config.BaseOptions.DiffDir, 0755)
			err := diffstate.WriteToFile(path.Join(config.BaseOptions.DiffDir, "last.state.txt"))
			if err != nil {
				log.Print("error writing last.state.txt: ", err)
			}
		}
	}

	if config.ImportOptions.Write {
		stepImport := log.StartStep("Importing OSM data")
		stepWrite := log.StartStep("Writing OSM data")
		progress := stats.NewStatsReporterWithEstimate(elementCounts)

		err = db.Init()
		if err != nil {
			log.Fatal(err)
		}

		bulkDb, ok := db.(database.BulkBeginner)
		if ok {
			err = bulkDb.BeginBulk()
		} else {
			err = db.Begin()
		}
		if err != nil {
			log.Fatal(err)
		}

		var diffCache *cache.DiffCache
		if config.ImportOptions.Diff {
			diffCache = cache.NewDiffCache(config.BaseOptions.CacheDir)
			if err = diffCache.Remove(); err != nil {
				log.Fatal(err)
			}
			if err = diffCache.Open(); err != nil {
				log.Fatal(err)
			}
		}

		err = osmCache.Open()
		if err != nil {
			log.Fatal(err)
		}
		if diffCache != nil {
			diffCache.Coords.SetLinearImport(true)
			diffCache.Ways.SetLinearImport(true)
		}
		osmCache.Coords.SetReadOnly(true)

		relations := osmCache.Relations.Iter()
		relWriter := writer.NewRelationWriter(osmCache, diffCache, relations,
			db, progress, config.BaseOptions.Srid)
		relWriter.SetLimiter(geometryLimiter)
		relWriter.EnableConcurrent()
		relWriter.Start()
		relWriter.Wait() // blocks till the Relations.Iter() finishes
		osmCache.Relations.Close()

		ways := osmCache.Ways.Iter()
		wayWriter := writer.NewWayWriter(osmCache, diffCache, ways, db,
			progress, config.BaseOptions.Srid)
		wayWriter.SetLimiter(geometryLimiter)
		wayWriter.EnableConcurrent()
		wayWriter.Start()
		wayWriter.Wait() // blocks till the Ways.Iter() finishes
		osmCache.Ways.Close()

		nodes := osmCache.Nodes.Iter()
		nodeWriter := writer.NewNodeWriter(osmCache, nodes, db,
			progress, config.BaseOptions.Srid)
		nodeWriter.SetLimiter(geometryLimiter)
		nodeWriter.EnableConcurrent()
		nodeWriter.Start()
		nodeWriter.Wait() // blocks till the Nodes.Iter() finishes
		osmCache.Close()

		err = db.End()
		if err != nil {
			log.Fatal(err)
		}

		progress.Stop()

		if config.ImportOptions.Diff {
			diffCache.Close()
		}

		log.StopStep(stepWrite)

		if db, ok := db.(database.Generalizer); ok {
			if err := db.Generalize(); err != nil {
				log.Fatal(err)
			}
		} else {
			log.Fatal("database not generalizeable")
		}

		if db, ok := db.(database.Finisher); ok {
			if err := db.Finish(); err != nil {
				log.Fatal(err)
			}
		} else {
			log.Fatal("database not finishable")
		}
		log.StopStep(stepImport)
	}

	if config.ImportOptions.Optimize {
		if db, ok := db.(database.Optimizer); ok {
			if err := db.Optimize(); err != nil {
				log.Fatal(err)
			}
		} else {
			log.Fatal("database not optimizable")
		}
	}

	if config.ImportOptions.DeployProduction {
		if db, ok := db.(database.Deployer); ok {
			if err := db.Deploy(); err != nil {
				log.Fatal(err)
			}
		} else {
			log.Fatal("database not deployable")
		}
	}

	if config.ImportOptions.RevertDeploy {
		if db, ok := db.(database.Deployer); ok {
			if err := db.RevertDeploy(); err != nil {
				log.Fatal(err)
			}
		} else {
			log.Fatal("database not deployable")
		}
	}

	if config.ImportOptions.RemoveBackup {
		if db, ok := db.(database.Deployer); ok {
			if err := db.RemoveBackup(); err != nil {
				log.Fatal(err)
			}
		} else {
			log.Fatal("database not deployable")
		}
	}

	log.StopStep(step)

}
示例#3
0
func mainimport() {
	if config.ImportOptions.Cpuprofile != "" {
		f, err := os.Create(config.ImportOptions.Cpuprofile)
		if err != nil {
			golog.Fatal(err)
		}
		pprof.StartCPUProfile(f)
		defer pprof.StopCPUProfile()
	}

	if config.ImportOptions.Httpprofile != "" {
		stats.StartHttpPProf(config.ImportOptions.Httpprofile)
	}

	if config.ImportOptions.Memprofile != "" {
		parts := strings.Split(config.ImportOptions.Memprofile, string(os.PathListSeparator))
		var interval time.Duration

		if len(parts) < 2 {
			interval, _ = time.ParseDuration("1m")
		} else {
			var err error
			interval, err = time.ParseDuration(parts[1])
			if err != nil {
				golog.Fatal(err)
			}
		}

		go stats.MemProfiler(parts[0], interval)
	}

	if config.ImportOptions.Quiet {
		logging.SetQuiet(true)
	}

	if (config.ImportOptions.Write || config.ImportOptions.Read != "") && (config.ImportOptions.RevertDeploy || config.ImportOptions.RemoveBackup) {
		log.Fatal("-revertdeploy and -removebackup not compatible with -read/-write")
	}

	if config.ImportOptions.RevertDeploy && (config.ImportOptions.RemoveBackup || config.ImportOptions.DeployProduction) {
		log.Fatal("-revertdeploy not compatible with -deployproduction/-removebackup")
	}

	var geometryLimiter *limit.Limiter
	if config.ImportOptions.Write && config.BaseOptions.LimitTo != "" {
		var err error
		step := log.StartStep("Reading limitto geometries")
		geometryLimiter, err = limit.NewFromOgrSource(config.BaseOptions.LimitTo)
		if err != nil {
			log.Fatal(err)
		}
		log.StopStep(step)
	}

	tagmapping, err := mapping.NewMapping(config.BaseOptions.MappingFile)
	if err != nil {
		log.Fatal("mapping file: ", err)
	}

	var db database.DB

	if config.ImportOptions.Write || config.ImportOptions.DeployProduction || config.ImportOptions.RevertDeploy || config.ImportOptions.RemoveBackup || config.ImportOptions.Optimize {
		if config.BaseOptions.Connection == "" {
			log.Fatal("missing connection option")
		}
		connType := database.ConnectionType(config.BaseOptions.Connection)
		conf := database.Config{
			Type:             connType,
			ConnectionParams: config.BaseOptions.Connection,
			Srid:             config.BaseOptions.Srid,
		}
		db, err = database.Open(conf, tagmapping)
		if err != nil {
			log.Fatal(err)
		}
	}

	osmCache := cache.NewOSMCache(config.BaseOptions.CacheDir)

	if config.ImportOptions.Read != "" && osmCache.Exists() {
		if config.ImportOptions.Overwritecache {
			log.Printf("removing existing cache %s", config.BaseOptions.CacheDir)
			err := osmCache.Remove()
			if err != nil {
				log.Fatal("unable to remove cache:", err)
			}
		} else if !config.ImportOptions.Appendcache {
			log.Fatal("cache already exists use -appendcache or -overwritecache")
		}
	}

	step := log.StartStep("Imposm")

	var elementCounts *stats.ElementCounts

	if config.ImportOptions.Read != "" {
		step := log.StartStep("Reading OSM data")
		err = osmCache.Open()
		if err != nil {
			log.Fatal(err)
		}
		progress := stats.NewStatsReporter()

		pbfFile, err := pbf.Open(config.ImportOptions.Read)
		if err != nil {
			log.Fatal(err)
		}

		osmCache.Coords.SetLinearImport(true)
		reader.ReadPbf(osmCache, progress, tagmapping, pbfFile)
		osmCache.Coords.SetLinearImport(false)
		elementCounts = progress.Stop()
		osmCache.Close()
		log.StopStep(step)
		if config.ImportOptions.Diff {
			diffstate := state.FromPbf(pbfFile)
			if diffstate != nil {
				diffstate.WriteToFile(path.Join(config.BaseOptions.CacheDir, "last.state.txt"))
			}
		}
	}

	if config.ImportOptions.Write {
		stepImport := log.StartStep("Importing OSM data")
		stepWrite := log.StartStep("Writing OSM data")
		progress := stats.NewStatsReporterWithEstimate(elementCounts)

		err = db.Init()
		if err != nil {
			log.Fatal(err)
		}

		bulkDb, ok := db.(database.BulkBeginner)
		if ok {
			err = bulkDb.BeginBulk()
		} else {
			err = db.Begin()
		}
		if err != nil {
			log.Fatal(err)
		}

		var diffCache *cache.DiffCache
		if config.ImportOptions.Diff {
			diffCache = cache.NewDiffCache(config.BaseOptions.CacheDir)
			if err = diffCache.Remove(); err != nil {
				log.Fatal(err)
			}
			if err = diffCache.Open(); err != nil {
				log.Fatal(err)
			}
		}

		err = osmCache.Open()
		if err != nil {
			log.Fatal(err)
		}
		if diffCache != nil {
			diffCache.Coords.SetLinearImport(true)
			diffCache.Ways.SetLinearImport(true)
		}
		osmCache.Coords.SetReadOnly(true)
		pointsTagMatcher := tagmapping.PointMatcher()
		lineStringsTagMatcher := tagmapping.LineStringMatcher()
		polygonsTagMatcher := tagmapping.PolygonMatcher()

		relations := osmCache.Relations.Iter()
		relWriter := writer.NewRelationWriter(osmCache, diffCache, relations,
			db, polygonsTagMatcher, progress, config.BaseOptions.Srid)
		relWriter.SetLimiter(geometryLimiter)
		relWriter.Start()

		// blocks till the Relations.Iter() finishes
		relWriter.Close()
		osmCache.Relations.Close()

		ways := osmCache.Ways.Iter()
		wayWriter := writer.NewWayWriter(osmCache, diffCache, ways, db,
			lineStringsTagMatcher, polygonsTagMatcher, progress, config.BaseOptions.Srid)
		wayWriter.SetLimiter(geometryLimiter)
		wayWriter.Start()

		// blocks till the Ways.Iter() finishes
		wayWriter.Close()
		osmCache.Ways.Close()

		nodes := osmCache.Nodes.Iter()
		nodeWriter := writer.NewNodeWriter(osmCache, nodes, db,
			pointsTagMatcher, progress, config.BaseOptions.Srid)
		nodeWriter.SetLimiter(geometryLimiter)
		nodeWriter.Start()

		// blocks till the Nodes.Iter() finishes
		nodeWriter.Close()
		osmCache.Close()

		err = db.End()
		if err != nil {
			log.Fatal(err)
		}

		progress.Stop()

		if config.ImportOptions.Diff {
			diffCache.Close()
		}

		log.StopStep(stepWrite)

		if db, ok := db.(database.Generalizer); ok {
			if err := db.Generalize(); err != nil {
				log.Fatal(err)
			}
		} else {
			log.Fatal("database not generalizeable")
		}

		if db, ok := db.(database.Finisher); ok {
			if err := db.Finish(); err != nil {
				log.Fatal(err)
			}
		} else {
			log.Fatal("database not finishable")
		}
		log.StopStep(stepImport)
	}

	if config.ImportOptions.Optimize {
		if db, ok := db.(database.Optimizer); ok {
			if err := db.Optimize(); err != nil {
				log.Fatal(err)
			}
		} else {
			log.Fatal("database not optimizable")
		}
	}

	if config.ImportOptions.DeployProduction {
		if db, ok := db.(database.Deployer); ok {
			if err := db.Deploy(); err != nil {
				log.Fatal(err)
			}
		} else {
			log.Fatal("database not deployable")
		}
	}

	if config.ImportOptions.RevertDeploy {
		if db, ok := db.(database.Deployer); ok {
			if err := db.RevertDeploy(); err != nil {
				log.Fatal(err)
			}
		} else {
			log.Fatal("database not deployable")
		}
	}

	if config.ImportOptions.RemoveBackup {
		if db, ok := db.(database.Deployer); ok {
			if err := db.RemoveBackup(); err != nil {
				log.Fatal(err)
			}
		} else {
			log.Fatal("database not deployable")
		}
	}

	log.StopStep(step)

}
示例#4
0
func Update(oscFile string, geometryLimiter *limit.Limiter, force bool) {
	state, err := diffstate.ParseFromOsc(oscFile)
	if err != nil {
		log.Fatal(err)
	}
	lastState, err := diffstate.ParseLastState(config.BaseOptions.CacheDir)
	if err != nil {
		log.Fatal(err)
	}

	if lastState != nil && lastState.Sequence != 0 && state != nil && state.Sequence <= lastState.Sequence {
		if !force {
			log.Warn(state, " already imported")
			return
		}
	}

	defer log.StopStep(log.StartStep(fmt.Sprintf("Processing %s", oscFile)))

	elems, errc := parser.Parse(oscFile)

	osmCache := cache.NewOSMCache(config.BaseOptions.CacheDir)
	err = osmCache.Open()
	if err != nil {
		log.Fatal("osm cache: ", err)
	}

	diffCache := cache.NewDiffCache(config.BaseOptions.CacheDir)
	err = diffCache.Open()
	if err != nil {
		log.Fatal("diff cache: ", err)
	}

	tagmapping, err := mapping.NewMapping(config.BaseOptions.MappingFile)
	if err != nil {
		log.Fatal(err)
	}

	connType := database.ConnectionType(config.BaseOptions.Connection)
	dbConf := database.Config{
		Type:             connType,
		ConnectionParams: config.BaseOptions.Connection,
		Srid:             config.BaseOptions.Srid,
	}
	db, err := database.Open(dbConf, tagmapping)
	if err != nil {
		log.Fatal("database open: ", err)
	}

	err = db.Begin()
	if err != nil {
		log.Fatal(err)
	}

	delDb, ok := db.(database.Deleter)
	if !ok {
		log.Fatal("database not deletable")
	}
	deleter := NewDeleter(
		delDb,
		osmCache,
		diffCache,
		tagmapping.PointMatcher(),
		tagmapping.LineStringMatcher(),
		tagmapping.PolygonMatcher(),
	)

	progress := stats.NewStatsReporter()

	expiredTiles := expire.NewTiles(14)

	relTagFilter := tagmapping.RelationTagFilter()
	wayTagFilter := tagmapping.WayTagFilter()
	nodeTagFilter := tagmapping.NodeTagFilter()

	pointsTagMatcher := tagmapping.PointMatcher()
	lineStringsTagMatcher := tagmapping.LineStringMatcher()
	polygonsTagMatcher := tagmapping.PolygonMatcher()

	relations := make(chan *element.Relation)
	ways := make(chan *element.Way)
	nodes := make(chan *element.Node)

	relWriter := writer.NewRelationWriter(osmCache, diffCache, relations,
		db, polygonsTagMatcher, progress, config.BaseOptions.Srid)
	relWriter.SetLimiter(geometryLimiter)
	relWriter.SetExpireTiles(expiredTiles)
	relWriter.Start()

	wayWriter := writer.NewWayWriter(osmCache, diffCache, ways, db,
		lineStringsTagMatcher, polygonsTagMatcher, progress, config.BaseOptions.Srid)
	wayWriter.SetLimiter(geometryLimiter)
	wayWriter.SetExpireTiles(expiredTiles)
	wayWriter.Start()

	nodeWriter := writer.NewNodeWriter(osmCache, nodes, db,
		pointsTagMatcher, progress, config.BaseOptions.Srid)
	nodeWriter.SetLimiter(geometryLimiter)
	nodeWriter.Start()

	nodeIds := make(map[int64]bool)
	wayIds := make(map[int64]bool)
	relIds := make(map[int64]bool)

	step := log.StartStep("Parsing changes, updating cache and removing elements")

	g := geos.NewGeos()
For:
	for {
		select {
		case elem := <-elems:
			if elem.Rel != nil {
				relTagFilter.Filter(&elem.Rel.Tags)
				progress.AddRelations(1)
			} else if elem.Way != nil {
				wayTagFilter.Filter(&elem.Way.Tags)
				progress.AddWays(1)
			} else if elem.Node != nil {
				nodeTagFilter.Filter(&elem.Node.Tags)
				if len(elem.Node.Tags) > 0 {
					progress.AddNodes(1)
				}
				progress.AddCoords(1)
			}
			if elem.Del {
				deleter.Delete(elem)
				if !elem.Add {
					if elem.Rel != nil {
						if err := osmCache.Relations.DeleteRelation(elem.Rel.Id); err != nil {
							log.Fatal(err)
						}
					} else if elem.Way != nil {
						if err := osmCache.Ways.DeleteWay(elem.Way.Id); err != nil {
							log.Fatal(err)
						}
						diffCache.Ways.Delete(elem.Way.Id)
					} else if elem.Node != nil {
						if err := osmCache.Nodes.DeleteNode(elem.Node.Id); err != nil {
							log.Fatal(err)
						}
						if err := osmCache.Coords.DeleteCoord(elem.Node.Id); err != nil {
							log.Fatal(err)
						}
					}
				}
			}
			if elem.Add {
				if elem.Rel != nil {
					// check if first member is cached to avoid caching
					// unneeded relations (typical outside of our coverage)
					if memberIsCached(elem.Rel.Members, osmCache.Ways) {
						osmCache.Relations.PutRelation(elem.Rel)
						relIds[elem.Rel.Id] = true
					}
				} else if elem.Way != nil {
					// check if first coord is cached to avoid caching
					// unneeded ways (typical outside of our coverage)
					if coordIsCached(elem.Way.Refs, osmCache.Coords) {
						osmCache.Ways.PutWay(elem.Way)
						wayIds[elem.Way.Id] = true
					}
				} else if elem.Node != nil {
					if geometryLimiter == nil || geometryLimiter.IntersectsBuffer(g, elem.Node.Long, elem.Node.Lat) {
						osmCache.Nodes.PutNode(elem.Node)
						osmCache.Coords.PutCoords([]element.Node{*elem.Node})
						nodeIds[elem.Node.Id] = true
					}
				}
			}
		case err := <-errc:
			if err != io.EOF {
				log.Fatal(err)
			}
			break For
		}
	}
	progress.Stop()
	log.StopStep(step)
	step = log.StartStep("Writing added/modified elements")

	progress = stats.NewStatsReporter()

	for nodeId, _ := range nodeIds {
		node, err := osmCache.Nodes.GetNode(nodeId)
		if err != nil {
			if err != cache.NotFound {
				log.Print(node, err)
			}
			// missing nodes can still be Coords
			// no `continue` here
		}
		if node != nil {
			// insert new node
			nodes <- node
		}
		dependers := diffCache.Coords.Get(nodeId)
		// mark depending ways for (re)insert
		for _, way := range dependers {
			wayIds[way] = true
		}
	}

	for wayId, _ := range wayIds {
		way, err := osmCache.Ways.GetWay(wayId)
		if err != nil {
			if err != cache.NotFound {
				log.Print(way, err)
			}
			continue
		}
		// insert new way
		ways <- way
		dependers := diffCache.Ways.Get(wayId)
		// mark depending relations for (re)insert
		for _, rel := range dependers {
			relIds[rel] = true
		}
	}

	for relId, _ := range relIds {
		rel, err := osmCache.Relations.GetRelation(relId)
		if err != nil {
			if err != cache.NotFound {
				log.Print(rel, err)
			}
			continue
		}
		// insert new relation
		relations <- rel
	}

	close(relations)
	close(ways)
	close(nodes)

	nodeWriter.Close()
	relWriter.Close()
	wayWriter.Close()

	err = db.End()
	if err != nil {
		log.Fatal(err)
	}
	err = db.Close()
	if err != nil {
		log.Fatal(err)
	}

	osmCache.Close()
	diffCache.Close()
	log.StopStep(step)

	step = log.StartStep("Updating expired tiles db")
	expire.WriteTileExpireDb(
		expiredTiles.SortedTiles(),
		"/tmp/expire_tiles.db",
	)
	log.StopStep(step)
	progress.Stop()

	if state != nil {
		err = diffstate.WriteLastState(config.BaseOptions.CacheDir, state)
		if err != nil {
			log.Warn(err) // warn only
		}
	}
}
示例#5
0
文件: main.go 项目: Kotaimen/imposm3
func Main(usage func()) {
	golog.SetFlags(golog.LstdFlags | golog.Lshortfile)
	if os.Getenv("GOMAXPROCS") == "" {
		runtime.GOMAXPROCS(runtime.NumCPU())
	}

	if len(os.Args) <= 1 {
		usage()
		logging.Shutdown()
		os.Exit(1)
	}

	switch os.Args[1] {
	case "import":
		config.ParseImport(os.Args[2:])
		if config.BaseOptions.Httpprofile != "" {
			stats.StartHttpPProf(config.BaseOptions.Httpprofile)
		}
		import_.Import()
	case "diff":
		config.ParseDiffImport(os.Args[2:])

		if config.BaseOptions.Httpprofile != "" {
			stats.StartHttpPProf(config.BaseOptions.Httpprofile)
		}

		if config.BaseOptions.Quiet {
			logging.SetQuiet(true)
		}

		var geometryLimiter *limit.Limiter
		if config.BaseOptions.LimitTo != "" {
			var err error
			step := log.StartStep("Reading limitto geometries")
			geometryLimiter, err = limit.NewFromGeoJsonWithBuffered(
				config.BaseOptions.LimitTo,
				config.BaseOptions.LimitToCacheBuffer,
			)
			if err != nil {
				log.Fatal(err)
			}
			log.StopStep(step)
		}
		osmCache := cache.NewOSMCache(config.BaseOptions.CacheDir)
		err := osmCache.Open()
		if err != nil {
			log.Fatal("osm cache: ", err)
		}
		defer osmCache.Close()

		diffCache := cache.NewDiffCache(config.BaseOptions.CacheDir)
		err = diffCache.Open()
		if err != nil {
			log.Fatal("diff cache: ", err)
		}

		for _, oscFile := range config.DiffFlags.Args() {
			err := diff.Update(oscFile, geometryLimiter, nil, osmCache, diffCache, false)
			if err != nil {
				osmCache.Close()
				diffCache.Close()
				log.Fatal(err)
			}
		}
		// explicitly Close since os.Exit prevents defers
		osmCache.Close()
		diffCache.Close()

	case "query-cache":
		query.Query(os.Args[2:])
	case "version":
		fmt.Println(Version)
		os.Exit(0)
	default:
		usage()
		log.Fatalf("invalid command: '%s'", os.Args[1])
	}
	logging.Shutdown()
	os.Exit(0)

}