Пример #1
0
// CreateIntentForCollection builds an intent for the given database and collection name
// along with a path to a .bson collection file. It searches the file's parent directory
// for a matching metadata file.
//
// This method is not called by CreateIntentsForDB,
// it is only used in the case where --db and --collection flags are set.
func (restore *MongoRestore) CreateIntentForCollection(db string, collection string, dir archive.DirLike) error {
	log.Logvf(log.DebugLow, "reading collection %v for database %v from %v",
		collection, db, dir.Path())
	// first make sure the bson file exists and is valid
	_, err := dir.Stat()
	if err != nil {
		return err
	}
	if dir.IsDir() {
		return fmt.Errorf("file %v is a directory, not a bson file", dir.Path())
	}

	baseName, fileType := restore.getInfoFromFilename(dir.Name())
	if fileType != BSONFileType {
		return fmt.Errorf("file %v does not have .bson extension", dir.Path())
	}

	// then create its intent
	intent := &intents.Intent{
		DB:       db,
		C:        collection,
		Size:     dir.Size(),
		Location: dir.Path(),
	}
	intent.BSONFile = &realBSONFile{path: dir.Path(), intent: intent, gzip: restore.InputOptions.Gzip}

	// finally, check if it has a .metadata.json file in its folder
	log.Logvf(log.DebugLow, "scanning directory %v for metadata", dir.Name())
	entries, err := dir.Parent().ReadDir()
	if err != nil {
		// try and carry on if we can
		log.Logvf(log.Info, "error attempting to locate metadata for file: %v", err)
		log.Logv(log.Info, "restoring collection without metadata")
		restore.manager.Put(intent)
		return nil
	}
	metadataName := baseName + ".metadata.json"
	if restore.InputOptions.Gzip {
		metadataName += ".gz"
	}
	for _, entry := range entries {
		if entry.Name() == metadataName {
			metadataPath := entry.Path()
			log.Logvf(log.Info, "found metadata for collection at %v", metadataPath)
			intent.MetadataLocation = metadataPath
			intent.MetadataFile = &realMetadataFile{path: metadataPath, intent: intent, gzip: restore.InputOptions.Gzip}
			break
		}
	}

	if intent.MetadataFile == nil {
		log.Logv(log.Info, "restoring collection without metadata")
	}

	restore.manager.Put(intent)

	return nil
}
Пример #2
0
// Prompt displays a prompt asking for the password and returns the
// password the user enters as a string.
func Prompt() string {
	var pass string
	if IsTerminal() {
		log.Logv(log.DebugLow, "standard input is a terminal; reading password from terminal")
		fmt.Fprintf(os.Stderr, "Enter password:"******"reading password from standard input")
		fmt.Fprintf(os.Stderr, "Enter password:")
		pass = readPassFromStdin()
	}
	fmt.Fprintln(os.Stderr)
	return pass
}
Пример #3
0
// ValidateSettings returns an error if any settings specified on the command line
// were invalid, or nil if they are valid.
func (exp *MongoExport) ValidateSettings() error {
	// Namespace must have a valid database if none is specified,
	// use 'test'
	if exp.ToolOptions.Namespace.DB == "" {
		exp.ToolOptions.Namespace.DB = "test"
	}
	err := util.ValidateDBName(exp.ToolOptions.Namespace.DB)
	if err != nil {
		return err
	}

	if exp.ToolOptions.Namespace.Collection == "" {
		return fmt.Errorf("must specify a collection")
	}
	if err = util.ValidateCollectionGrammar(exp.ToolOptions.Namespace.Collection); err != nil {
		return err
	}

	exp.OutputOpts.Type = strings.ToLower(exp.OutputOpts.Type)

	if exp.OutputOpts.CSVOutputType {
		log.Logv(log.Always, "csv flag is deprecated; please use --type=csv instead")
		exp.OutputOpts.Type = CSV
	}

	if exp.OutputOpts.Type == "" {
		// special error for an empty type value
		return fmt.Errorf("--type cannot be empty")
	}
	if exp.OutputOpts.Type != CSV && exp.OutputOpts.Type != JSON {
		return fmt.Errorf("invalid output type '%v', choose 'json' or 'csv'", exp.OutputOpts.Type)
	}

	if exp.InputOpts.Query != "" && exp.InputOpts.ForceTableScan {
		return fmt.Errorf("cannot use --forceTableScan when specifying --query")
	}

	if exp.InputOpts.Query != "" && exp.InputOpts.QueryFile != "" {
		return fmt.Errorf("either --query or --queryFile can be specified as a query option")
	}

	if exp.InputOpts != nil && exp.InputOpts.HasQuery() {
		content, err := exp.InputOpts.GetQuery()
		if err != nil {
			return err
		}
		_, err2 := getObjectFromByteArg(content)
		if err2 != nil {
			return err2
		}
	}

	if exp.InputOpts != nil && exp.InputOpts.Sort != "" {
		_, err := getSortFromArg(exp.InputOpts.Sort)
		if err != nil {
			return err
		}
	}
	return nil
}
Пример #4
0
// getTargetDirFromArgs handles the logic and error cases of figuring out
// the target restore directory.
func getTargetDirFromArgs(extraArgs []string, dirFlag string) (string, error) {
	// This logic is in a switch statement so that the rules are understandable.
	// We start by handling error cases, and then handle the different ways the target
	// directory can be legally set.
	switch {
	case len(extraArgs) > 1:
		// error on cases when there are too many positional arguments
		return "", fmt.Errorf("too many positional arguments")

	case dirFlag != "" && len(extraArgs) > 0:
		// error when positional arguments and --dir are used
		return "", fmt.Errorf(
			"cannot use both --dir and a positional argument to set the target directory")

	case len(extraArgs) == 1:
		// a nice, simple case where one argument is given, so we use it
		return extraArgs[0], nil

	case dirFlag != "":
		// if we have no extra args and a --dir flag, use the --dir flag
		log.Logv(log.Info, "using --dir flag instead of arguments")
		return dirFlag, nil

	default:
		return "", nil
	}
}
Пример #5
0
func handleSignals(finalizer func(), finishedChan chan struct{}) {
	// explicitly ignore SIGPIPE; the tools should deal with write errors
	noopChan := make(chan os.Signal)
	signal.Notify(noopChan, syscall.SIGPIPE)

	log.Logv(log.DebugLow, "will listen for SIGTERM, SIGINT, and SIGKILL")
	sigChan := make(chan os.Signal, 2)
	signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL)
	defer signal.Stop(sigChan)
	if finalizer != nil {
		select {
		case sig := <-sigChan:
			// first signal use finalizer to terminate cleanly
			log.Logvf(log.Always, "signal '%s' received; attempting to shut down", sig)
			finalizer()
		case <-finishedChan:
			return
		}
	}
	select {
	case sig := <-sigChan:
		// second signal exits immediately
		log.Logvf(log.Always, "signal '%s' received; forcefully terminating", sig)
		os.Exit(util.ExitKill)
	case <-finishedChan:
		return
	}
}
Пример #6
0
// Finalize processes the intents for prioritization. Currently only two
// kinds of prioritizers are supported. No more "Put" operations may be done
// after finalize is called.
func (manager *Manager) Finalize(pType PriorityType) {
	switch pType {
	case Legacy:
		log.Logv(log.DebugHigh, "finalizing intent manager with legacy prioritizer")
		manager.prioritizer = NewLegacyPrioritizer(manager.intentsByDiscoveryOrder)
	case LongestTaskFirst:
		log.Logv(log.DebugHigh, "finalizing intent manager with longest task first prioritizer")
		manager.prioritizer = NewLongestTaskFirstPrioritizer(manager.intentsByDiscoveryOrder)
	case MultiDatabaseLTF:
		log.Logv(log.DebugHigh, "finalizing intent manager with multi-database longest task first prioritizer")
		manager.prioritizer = NewMultiDatabaseLTFPrioritizer(manager.intentsByDiscoveryOrder)
	default:
		panic("cannot initialize IntentPrioritizer with unknown type")
	}
	// release these for the garbage collector and to ensure code correctness
	manager.intents = nil
	manager.intentsByDiscoveryOrder = nil
}
Пример #7
0
// GetDumpAuthVersion reads the admin.system.version collection in the dump directory
// to determine the authentication version of the files in the dump. If that collection is not
// present in the dump, we try to infer the authentication version based on its absence.
// Returns the authentication version number and any errors that occur.
func (restore *MongoRestore) GetDumpAuthVersion() (int, error) {
	// first handle the case where we have no auth version
	intent := restore.manager.AuthVersion()
	if intent == nil {
		if restore.InputOptions.RestoreDBUsersAndRoles {
			// If we are using --restoreDbUsersAndRoles, we cannot guarantee an
			// $admin.system.version collection from a 2.6 server,
			// so we can assume up to version 3.
			log.Logvf(log.Always, "no system.version bson file found in '%v' database dump", restore.NSOptions.DB)
			log.Logv(log.Always, "warning: assuming users and roles collections are of auth version 3")
			log.Logv(log.Always, "if users are from an earlier version of MongoDB, they may not restore properly")
			return 3, nil
		}
		log.Logv(log.Info, "no system.version bson file found in dump")
		log.Logv(log.Always, "assuming users in the dump directory are from <= 2.4 (auth version 1)")
		return 1, nil
	}

	err := intent.BSONFile.Open()
	if err != nil {
		return 0, err
	}
	defer intent.BSONFile.Close()
	bsonSource := db.NewDecodedBSONSource(db.NewBSONSource(intent.BSONFile))
	defer bsonSource.Close()

	versionDoc := bson.M{}
	for bsonSource.Next(&versionDoc) {
		id, ok := versionDoc["_id"].(string)
		if ok && id == "authSchema" {
			authVersion, ok := versionDoc["currentVersion"].(int)
			if ok {
				return authVersion, nil
			}
			return 0, fmt.Errorf("can't unmarshal system.version curentVersion as an int")
		}
		log.Logvf(log.DebugLow, "system.version document is not an authSchema %v", versionDoc["_id"])
	}
	err = bsonSource.Err()
	if err != nil {
		log.Logvf(log.Info, "can't unmarshal system.version document: %v", err)
	}
	return 0, fmt.Errorf("system.version bson file does not have authSchema document")
}
Пример #8
0
// CreateIndexes takes in an intent and an array of index documents and
// attempts to create them using the createIndexes command. If that command
// fails, we fall back to individual index creation.
func (restore *MongoRestore) CreateIndexes(intent *intents.Intent, indexes []IndexDocument) error {
	// first, sanitize the indexes
	for _, index := range indexes {
		// update the namespace of the index before inserting
		index.Options["ns"] = intent.Namespace()

		// check for length violations before building the command
		fullIndexName := fmt.Sprintf("%v.$%v", index.Options["ns"], index.Options["name"])
		if len(fullIndexName) > 127 {
			return fmt.Errorf(
				"cannot restore index with namespace '%v': "+
					"namespace is too long (max size is 127 bytes)", fullIndexName)
		}

		// remove the index version, forcing an update,
		// unless we specifically want to keep it
		if !restore.OutputOptions.KeepIndexVersion {
			delete(index.Options, "v")
		}
	}

	session, err := restore.SessionProvider.GetSession()
	if err != nil {
		return fmt.Errorf("error establishing connection: %v", err)
	}
	session.SetSafe(&mgo.Safe{})
	defer session.Close()

	// then attempt the createIndexes command
	rawCommand := bson.D{
		{"createIndexes", intent.C},
		{"indexes", indexes},
	}
	results := bson.M{}
	err = session.DB(intent.DB).Run(rawCommand, &results)
	if err == nil {
		return nil
	}
	if err.Error() != "no such cmd: createIndexes" {
		return fmt.Errorf("createIndex error: %v", err)
	}

	// if we're here, the connected server does not support the command, so we fall back
	log.Logv(log.Info, "\tcreateIndexes command not supported, attemping legacy index insertion")
	for _, idx := range indexes {
		log.Logvf(log.Info, "\tmanually creating index %v", idx.Options["name"])
		err = restore.LegacyInsertIndex(intent, idx)
		if err != nil {
			return fmt.Errorf("error creating index %v: %v", idx.Options["name"], err)
		}
	}
	return nil
}
Пример #9
0
// ValidateAuthVersions compares the authentication version of the dump files and the
// authentication version of the target server, and returns an error if the versions
// are incompatible.
func (restore *MongoRestore) ValidateAuthVersions() error {
	if restore.authVersions.Dump == 2 || restore.authVersions.Dump == 4 {
		return fmt.Errorf(
			"cannot restore users and roles from a dump file with auth version %v; "+
				"finish the upgrade or roll it back", restore.authVersions.Dump)
	}
	if restore.authVersions.Server == 2 || restore.authVersions.Server == 4 {
		return fmt.Errorf(
			"cannot restore users and roles to a server with auth version %v; "+
				"finish the upgrade or roll it back", restore.authVersions.Server)
	}
	switch restore.authVersions {
	case authVersionPair{3, 5}:
		log.Logv(log.Info,
			"restoring users and roles of auth version 3 to a server of auth version 5")
	case authVersionPair{5, 5}:
		log.Logv(log.Info,
			"restoring users and roles of auth version 5 to a server of auth version 5")
	case authVersionPair{3, 3}:
		log.Logv(log.Info,
			"restoring users and roles of auth version 3 to a server of auth version 3")
	case authVersionPair{1, 1}:
		log.Logv(log.Info,
			"restoring users and roles of auth version 1 to a server of auth version 1")
	case authVersionPair{1, 5}:
		return fmt.Errorf("cannot restore users of auth version 1 to a server of auth version 5")
	case authVersionPair{5, 3}:
		return fmt.Errorf("cannot restore users of auth version 5 to a server of auth version 3")
	case authVersionPair{1, 3}:
		log.Logv(log.Info,
			"restoring users and roles of auth version 1 to a server of auth version 3")
		log.Logv(log.Always,
			"users and roles will have to be updated with the authSchemaUpgrade command")
	case authVersionPair{5, 1}:
		fallthrough
	case authVersionPair{3, 1}:
		return fmt.Errorf(
			"cannot restore users and roles dump file >= auth version 3 to a server of auth version 1")
	default:
		return fmt.Errorf("invalid auth pair: dump=%v, server=%v",
			restore.authVersions.Dump, restore.authVersions.Server)
	}
	return nil

}
Пример #10
0
// ParseAndValidateOptions returns a non-nil error if user-supplied options are invalid.
func (restore *MongoRestore) ParseAndValidateOptions() error {
	// Can't use option pkg defaults for --objcheck because it's two separate flags,
	// and we need to be able to see if they're both being used. We default to
	// true here and then see if noobjcheck is enabled.
	log.Logv(log.DebugHigh, "checking options")
	if restore.InputOptions.Objcheck {
		restore.objCheck = true
		log.Logv(log.DebugHigh, "\tdumping with object check enabled")
	} else {
		log.Logv(log.DebugHigh, "\tdumping with object check disabled")
	}

	if restore.NSOptions.DB == "" && restore.NSOptions.Collection != "" {
		return fmt.Errorf("cannot restore a collection without a specified database")
	}

	if restore.NSOptions.DB != "" {
		if err := util.ValidateDBName(restore.NSOptions.DB); err != nil {
			return fmt.Errorf("invalid db name: %v", err)
		}
	}
	if restore.NSOptions.Collection != "" {
		if err := util.ValidateCollectionGrammar(restore.NSOptions.Collection); err != nil {
			return fmt.Errorf("invalid collection name: %v", err)
		}
	}
	if restore.InputOptions.RestoreDBUsersAndRoles && restore.NSOptions.DB == "" {
		return fmt.Errorf("cannot use --restoreDbUsersAndRoles without a specified database")
	}
	if restore.InputOptions.RestoreDBUsersAndRoles && restore.NSOptions.DB == "admin" {
		return fmt.Errorf("cannot use --restoreDbUsersAndRoles with the admin database")
	}

	var err error
	restore.isMongos, err = restore.SessionProvider.IsMongos()
	if err != nil {
		return err
	}
	if restore.isMongos {
		log.Logv(log.DebugLow, "restoring to a sharded system")
	}

	if restore.InputOptions.OplogLimit != "" {
		if !restore.InputOptions.OplogReplay {
			return fmt.Errorf("cannot use --oplogLimit without --oplogReplay enabled")
		}
		restore.oplogLimit, err = ParseTimestampFlag(restore.InputOptions.OplogLimit)
		if err != nil {
			return fmt.Errorf("error parsing timestamp argument to --oplogLimit: %v", err)
		}
	}
	if restore.InputOptions.OplogFile != "" {
		if !restore.InputOptions.OplogReplay {
			return fmt.Errorf("cannot use --oplogFile without --oplogReplay enabled")
		}
		if restore.InputOptions.Archive != "" {
			return fmt.Errorf("cannot use --oplogFile with --archive specified")
		}
	}

	// check if we are using a replica set and fall back to w=1 if we aren't (for <= 2.4)
	nodeType, err := restore.SessionProvider.GetNodeType()
	if err != nil {
		return fmt.Errorf("error determining type of connected node: %v", err)
	}

	log.Logvf(log.DebugLow, "connected to node type: %v", nodeType)
	restore.safety, err = db.BuildWriteConcern(restore.OutputOptions.WriteConcern, nodeType)
	if err != nil {
		return fmt.Errorf("error parsing write concern: %v", err)
	}

	// deprecations with --nsInclude --nsExclude
	if restore.NSOptions.DB != "" || restore.NSOptions.Collection != "" {
		// these are only okay if restoring from a bson file
		_, fileType := restore.getInfoFromFilename(restore.TargetDirectory)
		if fileType != BSONFileType {
			log.Logvf(log.Always, "the --db and --collection args should only be used when "+
				"restoring from a BSON file. Other uses are deprecated and will not exist "+
				"in the future; use --nsInclude instead")
		}
	}
	if len(restore.NSOptions.ExcludedCollections) > 0 ||
		len(restore.NSOptions.ExcludedCollectionPrefixes) > 0 {
		log.Logvf(log.Always, "the --excludeCollections and --excludeCollectionPrefixes options "+
			"are deprecated and will not exist in the future; use --nsExclude instead")
	}
	if restore.InputOptions.OplogReplay {
		if len(restore.NSOptions.NSInclude) > 0 || restore.NSOptions.DB != "" {
			return fmt.Errorf("cannot use --oplogReplay with includes specified")
		}
		if len(restore.NSOptions.NSExclude) > 0 || len(restore.NSOptions.ExcludedCollections) > 0 ||
			len(restore.NSOptions.ExcludedCollectionPrefixes) > 0 {
			return fmt.Errorf("cannot use --oplogReplay with excludes specified")
		}
		if len(restore.NSOptions.NSFrom) > 0 {
			return fmt.Errorf("cannot use --oplogReplay with namespace renames specified")
		}
	}

	includes := restore.NSOptions.NSInclude
	if restore.NSOptions.DB != "" && restore.NSOptions.Collection != "" {
		includes = append(includes, ns.Escape(restore.NSOptions.DB)+"."+
			restore.NSOptions.Collection)
	} else if restore.NSOptions.DB != "" {
		includes = append(includes, ns.Escape(restore.NSOptions.DB)+".*")
	}
	if len(includes) == 0 {
		includes = []string{"*"}
	}
	restore.includer, err = ns.NewMatcher(includes)
	if err != nil {
		return fmt.Errorf("invalid includes: %v", err)
	}

	if len(restore.NSOptions.ExcludedCollections) > 0 && restore.NSOptions.Collection != "" {
		return fmt.Errorf("--collection is not allowed when --excludeCollection is specified")
	}
	if len(restore.NSOptions.ExcludedCollectionPrefixes) > 0 && restore.NSOptions.Collection != "" {
		return fmt.Errorf("--collection is not allowed when --excludeCollectionsWithPrefix is specified")
	}
	excludes := restore.NSOptions.NSExclude
	for _, col := range restore.NSOptions.ExcludedCollections {
		excludes = append(excludes, "*."+ns.Escape(col))
	}
	for _, colPrefix := range restore.NSOptions.ExcludedCollectionPrefixes {
		excludes = append(excludes, "*."+ns.Escape(colPrefix)+"*")
	}
	restore.excluder, err = ns.NewMatcher(excludes)
	if err != nil {
		return fmt.Errorf("invalid excludes: %v", err)
	}

	if len(restore.NSOptions.NSFrom) != len(restore.NSOptions.NSTo) {
		return fmt.Errorf("--nsFrom and --nsTo arguments must be specified an equal number of times")
	}
	restore.renamer, err = ns.NewRenamer(restore.NSOptions.NSFrom, restore.NSOptions.NSTo)
	if err != nil {
		return fmt.Errorf("invalid renames: %v", err)
	}

	if restore.OutputOptions.NumInsertionWorkers < 0 {
		return fmt.Errorf(
			"cannot specify a negative number of insertion workers per collection")
	}

	// a single dash signals reading from stdin
	if restore.TargetDirectory == "-" {
		if restore.InputOptions.Archive != "" {
			return fmt.Errorf(
				"cannot restore from \"-\" when --archive is specified")
		}
		if restore.NSOptions.Collection == "" {
			return fmt.Errorf("cannot restore from stdin without a specified collection")
		}
	}
	if restore.stdin == nil {
		restore.stdin = os.Stdin
	}

	return nil
}
Пример #11
0
// Restore runs the mongorestore program.
func (restore *MongoRestore) Restore() error {
	var target archive.DirLike
	err := restore.ParseAndValidateOptions()
	if err != nil {
		log.Logvf(log.DebugLow, "got error from options parsing: %v", err)
		return err
	}

	// Build up all intents to be restored
	restore.manager = intents.NewIntentManager()
	if restore.InputOptions.Archive == "" && restore.InputOptions.OplogReplay {
		restore.manager.SetSmartPickOplog(true)
	}

	if restore.InputOptions.Archive != "" {
		archiveReader, err := restore.getArchiveReader()
		if err != nil {
			return err
		}
		restore.archive = &archive.Reader{
			In:      archiveReader,
			Prelude: &archive.Prelude{},
		}
		err = restore.archive.Prelude.Read(restore.archive.In)
		if err != nil {
			return err
		}
		log.Logvf(log.DebugLow, `archive format version "%v"`, restore.archive.Prelude.Header.FormatVersion)
		log.Logvf(log.DebugLow, `archive server version "%v"`, restore.archive.Prelude.Header.ServerVersion)
		log.Logvf(log.DebugLow, `archive tool version "%v"`, restore.archive.Prelude.Header.ToolVersion)
		target, err = restore.archive.Prelude.NewPreludeExplorer()
		if err != nil {
			return err
		}
	} else if restore.TargetDirectory != "-" {
		var usedDefaultTarget bool
		if restore.TargetDirectory == "" {
			restore.TargetDirectory = "dump"
			log.Logv(log.Always, "using default 'dump' directory")
			usedDefaultTarget = true
		}
		target, err = newActualPath(restore.TargetDirectory)
		if err != nil {
			if usedDefaultTarget {
				log.Logv(log.Always, "see mongorestore --help for usage information")
			}
			return fmt.Errorf("mongorestore target '%v' invalid: %v", restore.TargetDirectory, err)
		}
		// handle cases where the user passes in a file instead of a directory
		if !target.IsDir() {
			log.Logv(log.DebugLow, "mongorestore target is a file, not a directory")
			err = restore.handleBSONInsteadOfDirectory(restore.TargetDirectory)
			if err != nil {
				return err
			}
		} else {
			log.Logv(log.DebugLow, "mongorestore target is a directory, not a file")
		}
	}
	if restore.NSOptions.Collection != "" &&
		restore.OutputOptions.NumParallelCollections > 1 &&
		restore.OutputOptions.NumInsertionWorkers == 1 {
		// handle special parallelization case when we are only restoring one collection
		// by mapping -j to insertion workers rather than parallel collections
		log.Logvf(log.DebugHigh,
			"setting number of insertions workers to number of parallel collections (%v)",
			restore.OutputOptions.NumParallelCollections)
		restore.OutputOptions.NumInsertionWorkers = restore.OutputOptions.NumParallelCollections
	}
	if restore.InputOptions.Archive != "" {
		if int(restore.archive.Prelude.Header.ConcurrentCollections) > restore.OutputOptions.NumParallelCollections {
			restore.OutputOptions.NumParallelCollections = int(restore.archive.Prelude.Header.ConcurrentCollections)
			restore.OutputOptions.NumInsertionWorkers = int(restore.archive.Prelude.Header.ConcurrentCollections)
			log.Logvf(log.Always,
				"setting number of parallel collections to number of parallel collections in archive (%v)",
				restore.archive.Prelude.Header.ConcurrentCollections,
			)
		}
	}

	// Create the demux before intent creation, because muted archive intents need
	// to register themselves with the demux directly
	if restore.InputOptions.Archive != "" {
		restore.archive.Demux = &archive.Demultiplexer{
			In: restore.archive.In,
		}
	}

	switch {
	case restore.InputOptions.Archive != "":
		log.Logvf(log.Always, "preparing collections to restore from")
		err = restore.CreateAllIntents(target)
	case restore.NSOptions.DB != "" && restore.NSOptions.Collection == "":
		log.Logvf(log.Always,
			"building a list of collections to restore from %v dir",
			target.Path())
		err = restore.CreateIntentsForDB(
			restore.NSOptions.DB,
			target,
		)
	case restore.NSOptions.DB != "" && restore.NSOptions.Collection != "" && restore.TargetDirectory == "-":
		log.Logvf(log.Always, "setting up a collection to be read from standard input")
		err = restore.CreateStdinIntentForCollection(
			restore.NSOptions.DB,
			restore.NSOptions.Collection,
		)
	case restore.NSOptions.DB != "" && restore.NSOptions.Collection != "":
		log.Logvf(log.Always, "checking for collection data in %v", target.Path())
		err = restore.CreateIntentForCollection(
			restore.NSOptions.DB,
			restore.NSOptions.Collection,
			target,
		)
	default:
		log.Logvf(log.Always, "preparing collections to restore from")
		err = restore.CreateAllIntents(target)
	}
	if err != nil {
		return fmt.Errorf("error scanning filesystem: %v", err)
	}

	if restore.isMongos && restore.manager.HasConfigDBIntent() && restore.NSOptions.DB == "" {
		return fmt.Errorf("cannot do a full restore on a sharded system - " +
			"remove the 'config' directory from the dump directory first")
	}

	if restore.InputOptions.OplogFile != "" {
		err = restore.CreateIntentForOplog()
		if err != nil {
			return fmt.Errorf("error reading oplog file: %v", err)
		}
	}
	if restore.InputOptions.OplogReplay && restore.manager.Oplog() == nil {
		return fmt.Errorf("no oplog file to replay; make sure you run mongodump with --oplog")
	}
	if restore.manager.GetOplogConflict() {
		return fmt.Errorf("cannot provide both an oplog.bson file and an oplog file with --oplogFile, " +
			"nor can you provide both a local/oplog.rs.bson and a local/oplog.$main.bson file.")
	}

	conflicts := restore.manager.GetDestinationConflicts()
	if len(conflicts) > 0 {
		for _, conflict := range conflicts {
			log.Logvf(log.Always, "%s", conflict.Error())
		}
		return fmt.Errorf("cannot restore with conflicting namespace destinations")
	}

	if restore.OutputOptions.DryRun {
		log.Logvf(log.Always, "dry run completed")
		return nil
	}

	if restore.InputOptions.Archive != "" {
		namespaceChan := make(chan string, 1)
		namespaceErrorChan := make(chan error)
		restore.archive.Demux.NamespaceChan = namespaceChan
		restore.archive.Demux.NamespaceErrorChan = namespaceErrorChan

		go restore.archive.Demux.Run()
		// consume the new namespace announcement from the demux for all of the special collections
		// that get cached when being read out of the archive.
		// The first regular collection found gets pushed back on to the namespaceChan
		// consume the new namespace announcement from the demux for all of the collections that get cached
		for {
			ns, ok := <-namespaceChan
			// the archive can have only special collections. In that case we keep reading until
			// the namespaces are exhausted, indicated by the namespaceChan being closed.
			if !ok {
				break
			}
			intent := restore.manager.IntentForNamespace(ns)
			if intent == nil {
				return fmt.Errorf("no intent for collection in archive: %v", ns)
			}
			if intent.IsSystemIndexes() ||
				intent.IsUsers() ||
				intent.IsRoles() ||
				intent.IsAuthVersion() {
				log.Logvf(log.DebugLow, "special collection %v found", ns)
				namespaceErrorChan <- nil
			} else {
				// Put the ns back on the announcement chan so that the
				// demultiplexer can start correctly
				log.Logvf(log.DebugLow, "first non special collection %v found."+
					" The demultiplexer will handle it and the remainder", ns)
				namespaceChan <- ns
				break
			}
		}
	}

	// If restoring users and roles, make sure we validate auth versions
	if restore.ShouldRestoreUsersAndRoles() {
		log.Logv(log.Info, "comparing auth version of the dump directory and target server")
		restore.authVersions.Dump, err = restore.GetDumpAuthVersion()
		if err != nil {
			return fmt.Errorf("error getting auth version from dump: %v", err)
		}
		restore.authVersions.Server, err = auth.GetAuthVersion(restore.SessionProvider)
		if err != nil {
			return fmt.Errorf("error getting auth version of server: %v", err)
		}
		err = restore.ValidateAuthVersions()
		if err != nil {
			return fmt.Errorf(
				"the users and roles collections in the dump have an incompatible auth version with target server: %v",
				err)
		}
	}

	err = restore.LoadIndexesFromBSON()
	if err != nil {
		return fmt.Errorf("restore error: %v", err)
	}

	// Restore the regular collections
	if restore.InputOptions.Archive != "" {
		restore.manager.UsePrioritizer(restore.archive.Demux.NewPrioritizer(restore.manager))
	} else if restore.OutputOptions.NumParallelCollections > 1 {
		restore.manager.Finalize(intents.MultiDatabaseLTF)
	} else {
		// use legacy restoration order if we are single-threaded
		restore.manager.Finalize(intents.Legacy)
	}

	restore.termChan = make(chan struct{})

	if err := restore.RestoreIntents(); err != nil {
		return err
	}

	// Restore users/roles
	if restore.ShouldRestoreUsersAndRoles() {
		err = restore.RestoreUsersOrRoles(restore.manager.Users(), restore.manager.Roles())
		if err != nil {
			return fmt.Errorf("restore error: %v", err)
		}
	}

	// Restore oplog
	if restore.InputOptions.OplogReplay {
		err = restore.RestoreOplog()
		if err != nil {
			return fmt.Errorf("restore error: %v", err)
		}
	}

	log.Logv(log.Always, "done")

	return nil
}
Пример #12
0
// RestoreOplog attempts to restore a MongoDB oplog.
func (restore *MongoRestore) RestoreOplog() error {
	log.Logv(log.Always, "replaying oplog")
	intent := restore.manager.Oplog()
	if intent == nil {
		// this should not be reached
		log.Logv(log.Always, "no oplog file provided, skipping oplog application")
		return nil
	}
	if err := intent.BSONFile.Open(); err != nil {
		return err
	}
	defer intent.BSONFile.Close()
	// NewBufferlessBSONSource reads each bson document into its own buffer
	// because bson.Unmarshal currently can't unmarshal binary types without
	// them referencing the source buffer
	bsonSource := db.NewDecodedBSONSource(db.NewBufferlessBSONSource(intent.BSONFile))
	defer bsonSource.Close()

	rawOplogEntry := &bson.Raw{}

	var totalOps int64
	var entrySize int

	oplogProgressor := progress.NewCounter(intent.BSONSize)
	if restore.ProgressManager != nil {
		restore.ProgressManager.Attach("oplog", oplogProgressor)
		defer restore.ProgressManager.Detach("oplog")
	}

	session, err := restore.SessionProvider.GetSession()
	if err != nil {
		return fmt.Errorf("error establishing connection: %v", err)
	}
	defer session.Close()

	for bsonSource.Next(rawOplogEntry) {
		entrySize = len(rawOplogEntry.Data)

		entryAsOplog := db.Oplog{}
		err = bson.Unmarshal(rawOplogEntry.Data, &entryAsOplog)
		if err != nil {
			return fmt.Errorf("error reading oplog: %v", err)
		}
		if entryAsOplog.Operation == "n" {
			//skip no-ops
			continue
		}
		if !restore.TimestampBeforeLimit(entryAsOplog.Timestamp) {
			log.Logvf(
				log.DebugLow,
				"timestamp %v is not below limit of %v; ending oplog restoration",
				entryAsOplog.Timestamp,
				restore.oplogLimit,
			)
			break
		}

		totalOps++
		oplogProgressor.Inc(int64(entrySize))
		err = restore.ApplyOps(session, []interface{}{entryAsOplog})
		if err != nil {
			return fmt.Errorf("error applying oplog: %v", err)
		}
	}

	log.Logvf(log.Info, "applied %v ops", totalOps)
	return nil

}
Пример #13
0
func main() {
	// initialize command-line opts
	opts := options.New("bsondump", bsondump.Usage, options.EnabledOptions{})
	bsonDumpOpts := &bsondump.BSONDumpOptions{}
	opts.AddOptions(bsonDumpOpts)

	args, err := opts.Parse()
	if err != nil {
		log.Logvf(log.Always, "error parsing command line options: %v", err)
		log.Logvf(log.Always, "try 'bsondump --help' for more information")
		os.Exit(util.ExitBadOptions)
	}

	// print help, if specified
	if opts.PrintHelp(false) {
		return
	}

	// print version, if specified
	if opts.PrintVersion() {
		return
	}

	log.SetVerbosity(opts.Verbosity)
	signals.Handle()

	if len(args) > 1 {
		log.Logvf(log.Always, "too many positional arguments: %v", args)
		log.Logvf(log.Always, "try 'bsondump --help' for more information")
		os.Exit(util.ExitBadOptions)
	}

	// If the user specified a bson input file
	if len(args) == 1 {
		if bsonDumpOpts.BSONFileName != "" {
			log.Logvf(log.Always, "Cannot specify both a positional argument and --bsonFile")
			os.Exit(util.ExitBadOptions)
		}

		bsonDumpOpts.BSONFileName = args[0]
	}

	dumper := bsondump.BSONDump{
		ToolOptions:     opts,
		BSONDumpOptions: bsonDumpOpts,
	}

	reader, err := bsonDumpOpts.GetBSONReader()
	if err != nil {
		log.Logvf(log.Always, "Getting BSON Reader Failed: %v", err)
		os.Exit(util.ExitError)
	}
	dumper.BSONSource = db.NewBSONSource(reader)
	defer dumper.BSONSource.Close()

	writer, err := bsonDumpOpts.GetWriter()
	if err != nil {
		log.Logvf(log.Always, "Getting Writer Failed: %v", err)
		os.Exit(util.ExitError)
	}
	dumper.Out = writer
	defer dumper.Out.Close()

	log.Logvf(log.DebugLow, "running bsondump with --objcheck: %v", bsonDumpOpts.ObjCheck)

	if len(bsonDumpOpts.Type) != 0 && bsonDumpOpts.Type != "debug" && bsonDumpOpts.Type != "json" {
		log.Logvf(log.Always, "Unsupported output type '%v'. Must be either 'debug' or 'json'", bsonDumpOpts.Type)
		os.Exit(util.ExitBadOptions)
	}

	var numFound int
	if bsonDumpOpts.Type == "debug" {
		numFound, err = dumper.Debug()
	} else {
		numFound, err = dumper.JSON()
	}

	log.Logvf(log.Always, "%v objects found", numFound)
	if err != nil {
		log.Logv(log.Always, err.Error())
		os.Exit(util.ExitError)
	}
}
Пример #14
0
// CreateAllIntents drills down into a dump folder, creating intents for all of
// the databases and collections it finds.
func (restore *MongoRestore) CreateAllIntents(dir archive.DirLike) error {
	log.Logvf(log.DebugHigh, "using %v as dump root directory", dir.Path())
	entries, err := dir.ReadDir()
	if err != nil {
		return fmt.Errorf("error reading root dump folder: %v", err)
	}
	for _, entry := range entries {
		if entry.IsDir() {
			if err = util.ValidateDBName(entry.Name()); err != nil {
				return fmt.Errorf("invalid database name '%v': %v", entry.Name(), err)
			}
			err = restore.CreateIntentsForDB(entry.Name(), entry)
			if err != nil {
				return err
			}
		} else {
			if entry.Name() == "oplog.bson" {
				if restore.InputOptions.OplogReplay {
					log.Logv(log.DebugLow, "found oplog.bson file to replay")
				}
				oplogIntent := &intents.Intent{
					C:        "oplog",
					Size:     entry.Size(),
					Location: entry.Path(),
				}
				if !restore.InputOptions.OplogReplay {
					if restore.InputOptions.Archive != "" {
						mutedOut := &archive.MutedCollection{
							Intent: oplogIntent,
							Demux:  restore.archive.Demux,
						}
						restore.archive.Demux.Open(
							oplogIntent.Namespace(),
							mutedOut,
						)
					}
					continue
				}
				if restore.InputOptions.Archive != "" {
					if restore.InputOptions.Archive == "-" {
						oplogIntent.Location = "archive on stdin"
					} else {
						oplogIntent.Location = fmt.Sprintf("archive '%v'", restore.InputOptions.Archive)
					}

					// no need to check that we want to cache here
					oplogIntent.BSONFile = &archive.RegularCollectionReceiver{
						Intent: oplogIntent,
						Origin: oplogIntent.Namespace(),
						Demux:  restore.archive.Demux,
					}
				} else {
					oplogIntent.BSONFile = &realBSONFile{path: entry.Path(), intent: oplogIntent, gzip: restore.InputOptions.Gzip}
				}
				restore.manager.Put(oplogIntent)
			} else {
				log.Logvf(log.Always, `don't know what to do with file "%v", skipping...`, entry.Path())
			}
		}
	}
	return nil
}
Пример #15
0
// RestoreOplog attempts to restore a MongoDB oplog.
func (restore *MongoRestore) RestoreOplog() error {
	log.Logv(log.Always, "replaying oplog")
	intent := restore.manager.Oplog()
	if intent == nil {
		// this should not be reached
		log.Logv(log.Always, "no oplog file provided, skipping oplog application")
		return nil
	}
	if err := intent.BSONFile.Open(); err != nil {
		return err
	}
	defer intent.BSONFile.Close()
	// NewBufferlessBSONSource reads each bson document into its own buffer
	// because bson.Unmarshal currently can't unmarshal binary types without
	// them referencing the source buffer
	bsonSource := db.NewDecodedBSONSource(db.NewBufferlessBSONSource(intent.BSONFile))
	defer bsonSource.Close()

	entryArray := make([]interface{}, 0, 1024)
	rawOplogEntry := &bson.Raw{}

	var totalOps int64
	var entrySize, bufferedBytes int

	oplogProgressor := progress.NewCounter(intent.BSONSize)
	if restore.ProgressManager != nil {
		restore.ProgressManager.Attach("oplog", oplogProgressor)
		defer restore.ProgressManager.Detach("oplog")
	}

	session, err := restore.SessionProvider.GetSession()
	if err != nil {
		return fmt.Errorf("error establishing connection: %v", err)
	}
	defer session.Close()

	// To restore the oplog, we iterate over the oplog entries,
	// filling up a buffer. Once the buffer reaches max document size,
	// apply the current buffered ops and reset the buffer.
	for bsonSource.Next(rawOplogEntry) {
		entrySize = len(rawOplogEntry.Data)
		if bufferedBytes+entrySize > oplogMaxCommandSize {
			err = restore.ApplyOps(session, entryArray)
			if err != nil {
				return fmt.Errorf("error applying oplog: %v", err)
			}
			entryArray = make([]interface{}, 0, 1024)
			bufferedBytes = 0
		}

		entryAsOplog := db.Oplog{}
		err = bson.Unmarshal(rawOplogEntry.Data, &entryAsOplog)
		if err != nil {
			return fmt.Errorf("error reading oplog: %v", err)
		}
		if entryAsOplog.Operation == "n" {
			//skip no-ops
			continue
		}
		if !restore.TimestampBeforeLimit(entryAsOplog.Timestamp) {
			log.Logvf(
				log.DebugLow,
				"timestamp %v is not below limit of %v; ending oplog restoration",
				entryAsOplog.Timestamp,
				restore.oplogLimit,
			)
			break
		}

		totalOps++
		bufferedBytes += entrySize
		oplogProgressor.Inc(int64(entrySize))
		entryArray = append(entryArray, entryAsOplog)
	}
	// finally, flush the remaining entries
	if len(entryArray) > 0 {
		err = restore.ApplyOps(session, entryArray)
		if err != nil {
			return fmt.Errorf("error applying oplog: %v", err)
		}
	}

	log.Logvf(log.Info, "applied %v ops", totalOps)
	return nil

}
Пример #16
0
// Dump handles some final options checking and executes MongoDump.
func (dump *MongoDump) Dump() (err error) {
	defer dump.sessionProvider.Close()

	dump.shutdownIntentsNotifier = newNotifier()

	if dump.InputOptions.HasQuery() {
		// parse JSON then convert extended JSON values
		var asJSON interface{}
		content, err := dump.InputOptions.GetQuery()
		if err != nil {
			return err
		}
		err = json.Unmarshal(content, &asJSON)
		if err != nil {
			return fmt.Errorf("error parsing query as json: %v", err)
		}
		convertedJSON, err := bsonutil.ConvertJSONValueToBSON(asJSON)
		if err != nil {
			return fmt.Errorf("error converting query to bson: %v", err)
		}
		asMap, ok := convertedJSON.(map[string]interface{})
		if !ok {
			// unlikely to be reached
			return fmt.Errorf("query is not in proper format")
		}
		dump.query = bson.M(asMap)
	}

	if dump.OutputOptions.DumpDBUsersAndRoles {
		// first make sure this is possible with the connected database
		dump.authVersion, err = auth.GetAuthVersion(dump.sessionProvider)
		if err == nil {
			err = auth.VerifySystemAuthVersion(dump.sessionProvider)
		}
		if err != nil {
			return fmt.Errorf("error getting auth schema version for dumpDbUsersAndRoles: %v", err)
		}
		log.Logvf(log.DebugLow, "using auth schema version %v", dump.authVersion)
		if dump.authVersion < 3 {
			return fmt.Errorf("backing up users and roles is only supported for "+
				"deployments with auth schema versions >= 3, found: %v", dump.authVersion)
		}
	}

	if dump.OutputOptions.Archive != "" {
		//getArchiveOut gives us a WriteCloser to which we should write the archive
		var archiveOut io.WriteCloser
		archiveOut, err = dump.getArchiveOut()
		if err != nil {
			return err
		}
		dump.archive = &archive.Writer{
			// The archive.Writer needs its own copy of archiveOut because things
			// like the prelude are not written by the multiplexer.
			Out: archiveOut,
			Mux: archive.NewMultiplexer(archiveOut, dump.shutdownIntentsNotifier),
		}
		go dump.archive.Mux.Run()
		defer func() {
			// The Mux runs until its Control is closed
			close(dump.archive.Mux.Control)
			muxErr := <-dump.archive.Mux.Completed
			archiveOut.Close()
			if muxErr != nil {
				if err != nil {
					err = fmt.Errorf("archive writer: %v / %v", err, muxErr)
				} else {
					err = fmt.Errorf("archive writer: %v", muxErr)
				}
				log.Logvf(log.DebugLow, "%v", err)
			} else {
				log.Logvf(log.DebugLow, "mux completed successfully")
			}
		}()
	}

	// switch on what kind of execution to do
	switch {
	case dump.ToolOptions.DB == "" && dump.ToolOptions.Collection == "":
		err = dump.CreateAllIntents()
	case dump.ToolOptions.DB != "" && dump.ToolOptions.Collection == "":
		err = dump.CreateIntentsForDatabase(dump.ToolOptions.DB)
	case dump.ToolOptions.DB != "" && dump.ToolOptions.Collection != "":
		err = dump.CreateCollectionIntent(dump.ToolOptions.DB, dump.ToolOptions.Collection)
	}
	if err != nil {
		return err
	}

	if dump.OutputOptions.Oplog {
		err = dump.CreateOplogIntents()
		if err != nil {
			return err
		}
	}

	if dump.OutputOptions.DumpDBUsersAndRoles && dump.ToolOptions.DB != "admin" {
		err = dump.CreateUsersRolesVersionIntentsForDB(dump.ToolOptions.DB)
		if err != nil {
			return err
		}
	}

	// verify we can use repair cursors
	if dump.OutputOptions.Repair {
		log.Logv(log.DebugLow, "verifying that the connected server supports repairCursor")
		if dump.isMongos {
			return fmt.Errorf("cannot use --repair on mongos")
		}
		exampleIntent := dump.manager.Peek()
		if exampleIntent != nil {
			supported, err := dump.sessionProvider.SupportsRepairCursor(
				exampleIntent.DB, exampleIntent.C)
			if !supported {
				return err // no extra context needed
			}
		}
	}

	// IO Phase I
	// metadata, users, roles, and versions

	// TODO, either remove this debug or improve the language
	log.Logvf(log.DebugHigh, "dump phase I: metadata, indexes, users, roles, version")

	err = dump.DumpMetadata()
	if err != nil {
		return fmt.Errorf("error dumping metadata: %v", err)
	}

	if dump.OutputOptions.Archive != "" {
		session, err := dump.sessionProvider.GetSession()
		if err != nil {
			return err
		}
		defer session.Close()
		buildInfo, err := session.BuildInfo()
		var serverVersion string
		if err != nil {
			log.Logvf(log.Always, "warning, couldn't get version information from server: %v", err)
			serverVersion = "unknown"
		} else {
			serverVersion = buildInfo.Version
		}
		dump.archive.Prelude, err = archive.NewPrelude(dump.manager, dump.OutputOptions.NumParallelCollections, serverVersion)
		if err != nil {
			return fmt.Errorf("creating archive prelude: %v", err)
		}
		err = dump.archive.Prelude.Write(dump.archive.Out)
		if err != nil {
			return fmt.Errorf("error writing metadata into archive: %v", err)
		}
	}

	err = dump.DumpSystemIndexes()
	if err != nil {
		return fmt.Errorf("error dumping system indexes: %v", err)
	}

	if dump.ToolOptions.DB == "admin" || dump.ToolOptions.DB == "" {
		err = dump.DumpUsersAndRoles()
		if err != nil {
			return fmt.Errorf("error dumping users and roles: %v", err)
		}
	}
	if dump.OutputOptions.DumpDBUsersAndRoles {
		log.Logvf(log.Always, "dumping users and roles for %v", dump.ToolOptions.DB)
		if dump.ToolOptions.DB == "admin" {
			log.Logvf(log.Always, "skipping users/roles dump, already dumped admin database")
		} else {
			err = dump.DumpUsersAndRolesForDB(dump.ToolOptions.DB)
			if err != nil {
				return fmt.Errorf("error dumping users and roles for db: %v", err)
			}
		}
	}

	// If oplog capturing is enabled, we first check the most recent
	// oplog entry and save its timestamp, this will let us later
	// copy all oplog entries that occurred while dumping, creating
	// what is effectively a point-in-time snapshot.
	if dump.OutputOptions.Oplog {
		err := dump.determineOplogCollectionName()
		if err != nil {
			return fmt.Errorf("error finding oplog: %v", err)
		}
		log.Logvf(log.Info, "getting most recent oplog timestamp")
		dump.oplogStart, err = dump.getOplogStartTime()
		if err != nil {
			return fmt.Errorf("error getting oplog start: %v", err)
		}
	}

	if failpoint.Enabled(failpoint.PauseBeforeDumping) {
		time.Sleep(15 * time.Second)
	}

	// IO Phase II
	// regular collections

	// TODO, either remove this debug or improve the language
	log.Logvf(log.DebugHigh, "dump phase II: regular collections")

	// begin dumping intents
	if err := dump.DumpIntents(); err != nil {
		return err
	}

	// IO Phase III
	// oplog

	// TODO, either remove this debug or improve the language
	log.Logvf(log.DebugLow, "dump phase III: the oplog")

	// If we are capturing the oplog, we dump all oplog entries that occurred
	// while dumping the database. Before and after dumping the oplog,
	// we check to see if the oplog has rolled over (i.e. the most recent entry when
	// we started still exist, so we know we haven't lost data)
	if dump.OutputOptions.Oplog {
		log.Logvf(log.DebugLow, "checking if oplog entry %v still exists", dump.oplogStart)
		exists, err := dump.checkOplogTimestampExists(dump.oplogStart)
		if !exists {
			return fmt.Errorf(
				"oplog overflow: mongodump was unable to capture all new oplog entries during execution")
		}
		if err != nil {
			return fmt.Errorf("unable to check oplog for overflow: %v", err)
		}
		log.Logvf(log.DebugHigh, "oplog entry %v still exists", dump.oplogStart)

		log.Logvf(log.Always, "writing captured oplog to %v", dump.manager.Oplog().Location)
		err = dump.DumpOplogAfterTimestamp(dump.oplogStart)
		if err != nil {
			return fmt.Errorf("error dumping oplog: %v", err)
		}

		// check the oplog for a rollover one last time, to avoid a race condition
		// wherein the oplog rolls over in the time after our first check, but before
		// we copy it.
		log.Logvf(log.DebugLow, "checking again if oplog entry %v still exists", dump.oplogStart)
		exists, err = dump.checkOplogTimestampExists(dump.oplogStart)
		if !exists {
			return fmt.Errorf(
				"oplog overflow: mongodump was unable to capture all new oplog entries during execution")
		}
		if err != nil {
			return fmt.Errorf("unable to check oplog for overflow: %v", err)
		}
		log.Logvf(log.DebugHigh, "oplog entry %v still exists", dump.oplogStart)
	}

	log.Logvf(log.DebugLow, "finishing dump")

	return err
}
Пример #17
0
// RestoreIntent attempts to restore a given intent into MongoDB.
func (restore *MongoRestore) RestoreIntent(intent *intents.Intent) error {

	collectionExists, err := restore.CollectionExists(intent)
	if err != nil {
		return fmt.Errorf("error reading database: %v", err)
	}

	if restore.safety == nil && !restore.OutputOptions.Drop && collectionExists {
		log.Logvf(log.Always, "restoring to existing collection %v without dropping", intent.Namespace())
		log.Logv(log.Always, "Important: restored data will be inserted without raising errors; check your server log")
	}

	if restore.OutputOptions.Drop {
		if collectionExists {
			if strings.HasPrefix(intent.C, "system.") {
				log.Logvf(log.Always, "cannot drop system collection %v, skipping", intent.Namespace())
			} else {
				log.Logvf(log.Info, "dropping collection %v before restoring", intent.Namespace())
				err = restore.DropCollection(intent)
				if err != nil {
					return err // no context needed
				}
				collectionExists = false
			}
		} else {
			log.Logvf(log.DebugLow, "collection %v doesn't exist, skipping drop command", intent.Namespace())
		}
	}

	var options bson.D
	var indexes []IndexDocument

	// get indexes from system.indexes dump if we have it but don't have metadata files
	if intent.MetadataFile == nil {
		if _, ok := restore.dbCollectionIndexes[intent.DB]; ok {
			if indexes, ok = restore.dbCollectionIndexes[intent.DB][intent.C]; ok {
				log.Logvf(log.Always, "no metadata; falling back to system.indexes")
			}
		}
	}

	logMessageSuffix := "with no metadata"
	// first create the collection with options from the metadata file
	if intent.MetadataFile != nil {
		logMessageSuffix = "using options from metadata"
		err = intent.MetadataFile.Open()
		if err != nil {
			return err
		}
		defer intent.MetadataFile.Close()

		log.Logvf(log.Always, "reading metadata for %v from %v", intent.Namespace(), intent.MetadataLocation)
		metadata, err := ioutil.ReadAll(intent.MetadataFile)
		if err != nil {
			return fmt.Errorf("error reading metadata from %v: %v", intent.MetadataLocation, err)
		}
		options, indexes, err = restore.MetadataFromJSON(metadata)
		if err != nil {
			return fmt.Errorf("error parsing metadata from %v: %v", intent.MetadataLocation, err)
		}

		if restore.OutputOptions.NoOptionsRestore {
			log.Logv(log.Info, "not restoring collection options")
			logMessageSuffix = "with no collection options"
			options = nil
		}
	}
	if !collectionExists {
		log.Logvf(log.Info, "creating collection %v %s", intent.Namespace(), logMessageSuffix)
		log.Logvf(log.DebugHigh, "using collection options: %#v", options)
		err = restore.CreateCollection(intent, options)
		if err != nil {
			return fmt.Errorf("error creating collection %v: %v", intent.Namespace(), err)
		}
	} else {
		log.Logvf(log.Info, "collection %v already exists - skipping collection create", intent.Namespace())
	}

	var documentCount int64
	if intent.BSONFile != nil {
		err = intent.BSONFile.Open()
		if err != nil {
			return err
		}
		defer intent.BSONFile.Close()

		log.Logvf(log.Always, "restoring %v from %v", intent.Namespace(), intent.Location)

		bsonSource := db.NewDecodedBSONSource(db.NewBSONSource(intent.BSONFile))
		defer bsonSource.Close()

		documentCount, err = restore.RestoreCollectionToDB(intent.DB, intent.C, bsonSource, intent.BSONFile, intent.Size)
		if err != nil {
			return fmt.Errorf("error restoring from %v: %v", intent.Location, err)
		}
	}

	// finally, add indexes
	if len(indexes) > 0 && !restore.OutputOptions.NoIndexRestore {
		log.Logvf(log.Always, "restoring indexes for collection %v from metadata", intent.Namespace())
		err = restore.CreateIndexes(intent, indexes)
		if err != nil {
			return fmt.Errorf("error creating indexes for %v: %v", intent.Namespace(), err)
		}
	} else {
		log.Logv(log.Always, "no indexes to restore")
	}

	log.Logvf(log.Always, "finished restoring %v (%v %v)",
		intent.Namespace(), documentCount, util.Pluralize(int(documentCount), "document", "documents"))
	return nil
}
Пример #18
0
// Run executes the mongooplog program.
func (mo *MongoOplog) Run() error {

	// split up the oplog namespace we are using
	oplogDB, oplogColl, err :=
		util.SplitAndValidateNamespace(mo.SourceOptions.OplogNS)

	if err != nil {
		return err
	}

	// the full oplog namespace needs to be specified
	if oplogColl == "" {
		return fmt.Errorf("the oplog namespace must specify a collection")
	}

	log.Logvf(log.DebugLow, "using oplog namespace `%v.%v`", oplogDB, oplogColl)

	// connect to the destination server
	toSession, err := mo.SessionProviderTo.GetSession()
	if err != nil {
		return fmt.Errorf("error connecting to destination db: %v", err)
	}
	defer toSession.Close()
	toSession.SetSocketTimeout(0)

	// purely for logging
	destServerStr := mo.ToolOptions.Host
	if mo.ToolOptions.Port != "" {
		destServerStr = destServerStr + ":" + mo.ToolOptions.Port
	}
	log.Logvf(log.DebugLow, "successfully connected to destination server `%v`", destServerStr)

	// connect to the source server
	fromSession, err := mo.SessionProviderFrom.GetSession()
	if err != nil {
		return fmt.Errorf("error connecting to source db: %v", err)
	}
	defer fromSession.Close()
	fromSession.SetSocketTimeout(0)

	log.Logvf(log.DebugLow, "successfully connected to source server `%v`", mo.SourceOptions.From)

	// set slave ok
	fromSession.SetMode(mgo.Eventual, true)

	// get the tailing cursor for the source server's oplog
	tail := buildTailingCursor(fromSession.DB(oplogDB).C(oplogColl),
		mo.SourceOptions)
	defer tail.Close()

	// read the cursor dry, applying ops to the destination
	// server in the process
	oplogEntry := &db.Oplog{}
	res := &db.ApplyOpsResponse{}

	log.Logv(log.DebugLow, "applying oplog entries...")

	opCount := 0

	for tail.Next(oplogEntry) {

		// skip noops
		if oplogEntry.Operation == "n" {
			log.Logvf(log.DebugHigh, "skipping no-op for namespace `%v`", oplogEntry.Namespace)
			continue
		}
		opCount++

		// prepare the op to be applied
		opsToApply := []db.Oplog{*oplogEntry}

		// apply the operation
		err := toSession.Run(bson.M{"applyOps": opsToApply}, res)

		if err != nil {
			return fmt.Errorf("error applying ops: %v", err)
		}

		// check the server's response for an issue
		if !res.Ok {
			return fmt.Errorf("server gave error applying ops: %v", res.ErrMsg)
		}
	}

	// make sure there was no tailing error
	if err := tail.Err(); err != nil {
		return fmt.Errorf("error querying oplog: %v", err)
	}

	log.Logvf(log.DebugLow, "done applying %v oplog entries", opCount)

	return nil
}