// parse and convert extended JSON func (mf *MongoFiles) parseID() (interface{}, error) { // parse the id using extended json var asJSON interface{} err := json.Unmarshal([]byte(mf.FileName), &asJSON) if err != nil { return nil, fmt.Errorf( "error parsing _id as json: %v; make sure you are properly escaping input", err) } id, err := bsonutil.ConvertJSONValueToBSON(asJSON) if err != nil { return nil, fmt.Errorf("error converting _id to bson: %v", err) } return id, nil }
// Dump handles some final options checking and executes MongoDump. func (dump *MongoDump) Dump() (err error) { defer dump.sessionProvider.Close() dump.shutdownIntentsNotifier = newNotifier() if dump.InputOptions.HasQuery() { // parse JSON then convert extended JSON values var asJSON interface{} content, err := dump.InputOptions.GetQuery() if err != nil { return err } err = json.Unmarshal(content, &asJSON) if err != nil { return fmt.Errorf("error parsing query as json: %v", err) } convertedJSON, err := bsonutil.ConvertJSONValueToBSON(asJSON) if err != nil { return fmt.Errorf("error converting query to bson: %v", err) } asMap, ok := convertedJSON.(map[string]interface{}) if !ok { // unlikely to be reached return fmt.Errorf("query is not in proper format") } dump.query = bson.M(asMap) } if dump.OutputOptions.DumpDBUsersAndRoles { // first make sure this is possible with the connected database dump.authVersion, err = auth.GetAuthVersion(dump.sessionProvider) if err == nil { err = auth.VerifySystemAuthVersion(dump.sessionProvider) } if err != nil { return fmt.Errorf("error getting auth schema version for dumpDbUsersAndRoles: %v", err) } log.Logvf(log.DebugLow, "using auth schema version %v", dump.authVersion) if dump.authVersion < 3 { return fmt.Errorf("backing up users and roles is only supported for "+ "deployments with auth schema versions >= 3, found: %v", dump.authVersion) } } if dump.OutputOptions.Archive != "" { //getArchiveOut gives us a WriteCloser to which we should write the archive var archiveOut io.WriteCloser archiveOut, err = dump.getArchiveOut() if err != nil { return err } dump.archive = &archive.Writer{ // The archive.Writer needs its own copy of archiveOut because things // like the prelude are not written by the multiplexer. Out: archiveOut, Mux: archive.NewMultiplexer(archiveOut, dump.shutdownIntentsNotifier), } go dump.archive.Mux.Run() defer func() { // The Mux runs until its Control is closed close(dump.archive.Mux.Control) muxErr := <-dump.archive.Mux.Completed archiveOut.Close() if muxErr != nil { if err != nil { err = fmt.Errorf("archive writer: %v / %v", err, muxErr) } else { err = fmt.Errorf("archive writer: %v", muxErr) } log.Logvf(log.DebugLow, "%v", err) } else { log.Logvf(log.DebugLow, "mux completed successfully") } }() } // switch on what kind of execution to do switch { case dump.ToolOptions.DB == "" && dump.ToolOptions.Collection == "": err = dump.CreateAllIntents() case dump.ToolOptions.DB != "" && dump.ToolOptions.Collection == "": err = dump.CreateIntentsForDatabase(dump.ToolOptions.DB) case dump.ToolOptions.DB != "" && dump.ToolOptions.Collection != "": err = dump.CreateCollectionIntent(dump.ToolOptions.DB, dump.ToolOptions.Collection) } if err != nil { return err } if dump.OutputOptions.Oplog { err = dump.CreateOplogIntents() if err != nil { return err } } if dump.OutputOptions.DumpDBUsersAndRoles && dump.ToolOptions.DB != "admin" { err = dump.CreateUsersRolesVersionIntentsForDB(dump.ToolOptions.DB) if err != nil { return err } } // verify we can use repair cursors if dump.OutputOptions.Repair { log.Logv(log.DebugLow, "verifying that the connected server supports repairCursor") if dump.isMongos { return fmt.Errorf("cannot use --repair on mongos") } exampleIntent := dump.manager.Peek() if exampleIntent != nil { supported, err := dump.sessionProvider.SupportsRepairCursor( exampleIntent.DB, exampleIntent.C) if !supported { return err // no extra context needed } } } // IO Phase I // metadata, users, roles, and versions // TODO, either remove this debug or improve the language log.Logvf(log.DebugHigh, "dump phase I: metadata, indexes, users, roles, version") err = dump.DumpMetadata() if err != nil { return fmt.Errorf("error dumping metadata: %v", err) } if dump.OutputOptions.Archive != "" { session, err := dump.sessionProvider.GetSession() if err != nil { return err } defer session.Close() buildInfo, err := session.BuildInfo() var serverVersion string if err != nil { log.Logvf(log.Always, "warning, couldn't get version information from server: %v", err) serverVersion = "unknown" } else { serverVersion = buildInfo.Version } dump.archive.Prelude, err = archive.NewPrelude(dump.manager, dump.OutputOptions.NumParallelCollections, serverVersion) if err != nil { return fmt.Errorf("creating archive prelude: %v", err) } err = dump.archive.Prelude.Write(dump.archive.Out) if err != nil { return fmt.Errorf("error writing metadata into archive: %v", err) } } err = dump.DumpSystemIndexes() if err != nil { return fmt.Errorf("error dumping system indexes: %v", err) } if dump.ToolOptions.DB == "admin" || dump.ToolOptions.DB == "" { err = dump.DumpUsersAndRoles() if err != nil { return fmt.Errorf("error dumping users and roles: %v", err) } } if dump.OutputOptions.DumpDBUsersAndRoles { log.Logvf(log.Always, "dumping users and roles for %v", dump.ToolOptions.DB) if dump.ToolOptions.DB == "admin" { log.Logvf(log.Always, "skipping users/roles dump, already dumped admin database") } else { err = dump.DumpUsersAndRolesForDB(dump.ToolOptions.DB) if err != nil { return fmt.Errorf("error dumping users and roles for db: %v", err) } } } // If oplog capturing is enabled, we first check the most recent // oplog entry and save its timestamp, this will let us later // copy all oplog entries that occurred while dumping, creating // what is effectively a point-in-time snapshot. if dump.OutputOptions.Oplog { err := dump.determineOplogCollectionName() if err != nil { return fmt.Errorf("error finding oplog: %v", err) } log.Logvf(log.Info, "getting most recent oplog timestamp") dump.oplogStart, err = dump.getOplogStartTime() if err != nil { return fmt.Errorf("error getting oplog start: %v", err) } } if failpoint.Enabled(failpoint.PauseBeforeDumping) { time.Sleep(15 * time.Second) } // IO Phase II // regular collections // TODO, either remove this debug or improve the language log.Logvf(log.DebugHigh, "dump phase II: regular collections") // begin dumping intents if err := dump.DumpIntents(); err != nil { return err } // IO Phase III // oplog // TODO, either remove this debug or improve the language log.Logvf(log.DebugLow, "dump phase III: the oplog") // If we are capturing the oplog, we dump all oplog entries that occurred // while dumping the database. Before and after dumping the oplog, // we check to see if the oplog has rolled over (i.e. the most recent entry when // we started still exist, so we know we haven't lost data) if dump.OutputOptions.Oplog { log.Logvf(log.DebugLow, "checking if oplog entry %v still exists", dump.oplogStart) exists, err := dump.checkOplogTimestampExists(dump.oplogStart) if !exists { return fmt.Errorf( "oplog overflow: mongodump was unable to capture all new oplog entries during execution") } if err != nil { return fmt.Errorf("unable to check oplog for overflow: %v", err) } log.Logvf(log.DebugHigh, "oplog entry %v still exists", dump.oplogStart) log.Logvf(log.Always, "writing captured oplog to %v", dump.manager.Oplog().Location) err = dump.DumpOplogAfterTimestamp(dump.oplogStart) if err != nil { return fmt.Errorf("error dumping oplog: %v", err) } // check the oplog for a rollover one last time, to avoid a race condition // wherein the oplog rolls over in the time after our first check, but before // we copy it. log.Logvf(log.DebugLow, "checking again if oplog entry %v still exists", dump.oplogStart) exists, err = dump.checkOplogTimestampExists(dump.oplogStart) if !exists { return fmt.Errorf( "oplog overflow: mongodump was unable to capture all new oplog entries during execution") } if err != nil { return fmt.Errorf("unable to check oplog for overflow: %v", err) } log.Logvf(log.DebugHigh, "oplog entry %v still exists", dump.oplogStart) } log.Logvf(log.DebugLow, "finishing dump") return err }