func SetUserDetailInfo(sel, set map[string]interface{}) error { c := common.MongoCollection("dudb", "user_detail_info") if err := c.Update(bson.M(sel), bson.M{"$set": bson.M(set)}); err != nil { return errors.As(err, sel, set) } return nil }
func SetUserInfo(sel, set map[string]interface{}) error { c := common.MongoCollection(USER_DB, USER_INFO_TABLE) if err := c.Update(bson.M(sel), bson.M{"$set": bson.M(set)}); err != nil { return errors.As(err, sel, set) } return nil }
func SetTeamMember(sel, set map[string]interface{}) error { c := common.MongoCollection(TEAM_DB, TEAM_MEMBER_TABLE) if err := c.Update(bson.M(sel), bson.M{"$set": bson.M(set)}); err != nil { return errors.As(err, sel, set) } return nil }
// setUnsetUpdateAnnotations returns a bson.D for use // in an annotationsC txn.Op's Update field, containing $set and // $unset operators if the corresponding operands // are non-empty. func setUnsetUpdateAnnotations(set, unset bson.M) bson.D { var update bson.D replace := inSubdocReplacer("annotations") if len(set) > 0 { set = bson.M(copyMap(map[string]interface{}(set), replace)) update = append(update, bson.DocElem{"$set", set}) } if len(unset) > 0 { unset = bson.M(copyMap(map[string]interface{}(unset), replace)) update = append(update, bson.DocElem{"$unset", unset}) } return update }
func (repository MongoRepository) InsertSingle(request *messaging.ObjectRequest) RepositoryResponse { request.Log("Starting INSERT-SINGLE") response := RepositoryResponse{} collection, isError, errorMessage := getMongoConnection(request) key := getMongoDBRecordID(request, nil) if isError == true { response.GetErrorResponse(errorMessage) } else if key != "" { request.Body.Object[request.Body.Parameters.KeyProperty] = key request.Body.Object["_id"] = request.Controls.Namespace + "." + request.Controls.Class + "." + key err := collection.Insert(bson.M(request.Body.Object)) if err != nil { response.IsSuccess = false response.GetErrorResponse("Error inserting one object in mongo" + err.Error()) } else { response.IsSuccess = true response.Message = "Successfully inserted one object in Mongo" request.Log(response.Message) } } var Data []map[string]interface{} Data = make([]map[string]interface{}, 1) var actualData map[string]interface{} actualData = make(map[string]interface{}) actualData["ID"] = key Data[0] = actualData response.Data = Data return response }
func (db *MongoDatabaseSession) GetOneModel(mongoQuery M, result interface{}) error { collectionName := tools.GetInnerTypeName(result) log.Println(collectionName) collection := db.Database.C(collectionName) err := collection.Find(bson.M(mongoQuery)).One(result) return err }
// setUnsetUpdateSettings returns a bson.D for use // in a settingsC txn.Op's Update field, containing // $set and $unset operators if the corresponding // operands are non-empty. func setUnsetUpdateSettings(set, unset bson.M) bson.D { var update bson.D replace := inSubdocReplacer("settings") if len(set) > 0 { set = bson.M(copyMap(map[string]interface{}(set), replace)) update = append(update, bson.DocElem{"$set", set}) } if len(unset) > 0 { unset = bson.M(copyMap(map[string]interface{}(unset), replace)) update = append(update, bson.DocElem{"$unset", unset}) } if len(update) > 0 { update = append(update, bson.DocElem{"$inc", bson.D{{"version", 1}}}) } return update }
// confirm adds to filter confirm clause func confirm(filter model.Fields) bson.M { if filter == nil { filter = model.Fields{} } filter["confirm"] = true return bson.M(filter) }
func ReadAppBuildTaskList(m map[string]interface{}) ([]AppBuildTask, error) { fmt.Println(m) fmt.Println(bson.M(m)) var list []AppBuildTask err := taskCollection.Find(m).All(&list) return list, err }
func (bs *BaseStack) buildUpdateObj(m *stack.Machine, s *DialState, now time.Time) bson.M { obj := object.MetaBuilder.Build(bs.Provider.newMetadata(m)) obj["credential"] = m.Credential.Identifier obj["provider"] = bs.Provider.Name obj["queryString"] = m.QueryString obj["status.modifiedAt"] = now obj["status.state"] = m.State.String() obj["status.reason"] = m.StateReason if s.KiteURL != "" { obj["registerUrl"] = s.KiteURL if u, err := url.Parse(s.KiteURL); err == nil && u.Host != "" { if host, _, err := net.SplitHostPort(u.Host); err == nil { u.Host = host } obj["ipAddress"] = u.Host } } bs.Log.Debug("update object for %q: %+v (%# v)", m.Label, obj, s) return bson.M(obj) }
//mongodb_user func (d *MongoDb) GetUser(user_prop map[string]interface{}) (user utah.User, err error) { c := d.Session.DB(d.dbname).C(d.users_c_name) err = c.Find(bson.M(user_prop)).One(&user) if err != nil { return user, err } return user, nil }
func GetTeamList(sel map[string]interface{}, start, count int) ([]TeamInfo, error) { c := common.MongoCollection(TEAM_DB, TEAM_INFO_TABLE) team_list := []TeamInfo{} err := c.Find(bson.M(sel)).Skip(start).Limit(count).All(&team_list) return team_list, errors.As(err, sel) }
func GetUserInfo(sel map[string]interface{}) (*UserInfo, error) { c := common.MongoCollection(USER_DB, USER_INFO_TABLE) user := &UserInfo{} if err := c.Find(bson.M(sel)).One(user); err != nil { return nil, errors.As(err, sel) } return user, nil }
//GetModels retrieves all the data from mongoDB //mongoQuery is the query from MongoDB query //resultInterface is a slice representing the model required, it will be fill with the result of the query //limit of result if limit < 0 no limit used //skip corresponding the number elements to skip //return an err if soimething bad appened func (db *MongoDatabaseSession) GetModels(mongoQuery M, resultInterface interface{}, limit int, skip int) (interface{}, error) { collectionName := tools.GetInnerTypeName(resultInterface) collection := db.Database.C(collectionName) result := tools.CreatePtrToSliceFromInterface(resultInterface) var err error = nil switch { case limit <= 0 && skip <= 0: err = collection.Find(bson.M(mongoQuery)).All(result) case limit > 0 && skip <= 0: err = collection.Find(bson.M(mongoQuery)).Limit(limit).All(result) case limit <= 0 && skip > 0: err = collection.Find(bson.M(mongoQuery)).Skip(skip).All(result) case limit > 0 && skip > 0: err = collection.Find(bson.M(mongoQuery)).Skip(skip).Limit(limit).All(result) } resultInterface = tools.Dereference(result) return resultInterface, err }
// Fire - the log event func (h *mongoDB) Fire(entry *logrus.Entry) error { entry.Data["Level"] = entry.Level.String() entry.Data["Time"] = entry.Time entry.Data["Message"] = entry.Message mgoErr := h.c.Insert(bson.M(entry.Data)) if mgoErr != nil { return fmt.Errorf("Failed to send log entry to mongodb: %s", mgoErr) } return nil }
// Migrate implements the Database interface. func (db *mongoDatabase) Migrate(opts *MigrateOptions) error { stack := models.NewStackTemplate(opts.Provider, opts.Identifier) stack.Machines = make([]bson.M, len(opts.Machines)) for i := range stack.Machines { stack.Machines[i] = bson.M(machineBuilder.Build(opts.Machines[i])) } account, err := modelhelper.GetAccount(opts.Username) if err != nil { return fmt.Errorf("account lookup failed for %q: %s", opts.Username, err) } sum := sha1.Sum([]byte(opts.Template)) stack.Title = opts.StackName stack.OriginID = account.Id stack.Template.Details = bson.M{ "lastUpdaterId": account.Id, } stack.Group = opts.GroupName stack.Template.Content = opts.Template stack.Template.Sum = hex.EncodeToString(sum[:]) if s, err := yamlReencode(opts.Template); err == nil { stack.Template.RawContent = s } if err := modelhelper.CreateStackTemplate(stack); err != nil { return fmt.Errorf("failed to create stack template: %s", err) } change := bson.M{ "$set": bson.M{ "meta.migration.modifiedAt": time.Now(), "meta.migration.status": MigrationMigrated, "meta.migration.stackTemplateId": stack.Id, }, } for _, id := range opts.MachineIDs { if e := modelhelper.UpdateMachine(id, change); e != nil { err = multierror.Append(err, fmt.Errorf("failed to update migration details for %q: %s", id.Hex(), err)) } } // Failure updating jMachine migration metadata is not critical, // just log the error and continue. if err != nil { opts.Log.Error("%s", err) } return nil }
func main() { my_map := make(map[string]interface{}, 0) my_map["id"] = "222" my_bson_map := bson.M(my_map) test := bson.M{"id": "hello"} fmt.Printf("\n%v\n", test) fmt.Printf("\nNew Bson Map:%v", my_bson_map) }
func (repository MongoRepository) InsertMultiple(request *messaging.ObjectRequest) RepositoryResponse { request.Log("Starting INSERT-MULTIPLE") response := RepositoryResponse{} collection, isError, errorMessage := getMongoConnection(request) var idData map[string]interface{} idData = make(map[string]interface{}) if isError == true { response.GetErrorResponse(errorMessage) } else { isError = false if isError == true { response.IsSuccess = false request.Log("Error inserting multiple objects in Mongo : " + errorMessage) response.GetErrorResponse("Error inserting multiple objects in Mongo" + errorMessage) } else { response.IsSuccess = true response.Message = "Successfully inserted multiple objects in Mongo" request.Log(response.Message) } for i := 0; i < len(request.Body.Objects); i++ { key := getMongoDBRecordID(request, request.Body.Objects[i]) if key == "" { continue } request.Body.Objects[i]["_id"] = request.Controls.Namespace + "." + request.Controls.Class + "." + key request.Body.Objects[i][request.Body.Parameters.KeyProperty] = key idData[strconv.Itoa(i)] = key err := collection.Insert(bson.M(request.Body.Objects[i])) if err != nil { response.IsSuccess = false response.GetErrorResponse("Error inserting many object in mongo" + err.Error()) } else { response.IsSuccess = true response.Message = "Successfully inserted many object in Mongo" request.Log(response.Message) } } } var DataMap []map[string]interface{} DataMap = make([]map[string]interface{}, 1) var actualInput map[string]interface{} actualInput = make(map[string]interface{}) actualInput["ID"] = idData DataMap[0] = actualInput response.Data = DataMap return response }
// mungeInsert takes the value of an txn.Op Insert field and modifies // it to be multi-environment safe, returning the modified document. func (r *multiEnvRunner) mungeInsert(doc interface{}, docID interface{}) (interface{}, error) { switch doc := doc.(type) { case bson.D: return r.mungeBsonD(doc, docID) case bson.M: return doc, r.mungeBsonM(doc, docID) case map[string]interface{}: return doc, r.mungeBsonM(bson.M(doc), docID) default: return doc, r.mungeStruct(doc, docID) } }
// UserSet gets user applying optional filter and modifies the object // it will rise ErrNotFound if user is already confirmed func (s mongoStg) UserSet(ctx context.Context, userid string, fields, filter model.Fields) (*model.User, error) { id, err := toObjectId(userid) if err != nil { return nil, err } if filter == nil { filter = model.Fields{} } filter["_id"] = id c := s.col(ctx) err = c.Update(bson.M(filter), bson.M{"$set": bson.M(fields)}) if err != nil { return nil, err } user := &UserDB{} err = c.FindId(id).One(user) return user.Model(), err }
// ConvertToBSONMapSlice converts an []interface{}, []bson.D, or []bson.M slice to a []bson.M // slice (assuming that all contents are either bson.M or bson.D objects) func ConvertToBSONMapSlice(input interface{}) ([]bson.M, error) { inputBSONM, ok := input.([]bson.M) if ok { return inputBSONM, nil } inputBSOND, ok := input.([]bson.D) if ok { // just convert all of the bson.D documents to bson.M d := make([]bson.M, len(inputBSOND)) for i := 0; i < len(inputBSOND); i++ { doc := inputBSOND[i] d[i] = doc.Map() } return d, nil } inputInterface, ok := input.([]interface{}) if ok { d := make([]bson.M, len(inputInterface)) for i := 0; i < len(inputInterface); i++ { doc := inputInterface[i] docM, ok2 := doc.(bson.M) if !ok2 { // check if it's a bson.D docD, ok3 := doc.(bson.D) if ok3 { docM = docD.Map() } else { docMap, ok4 := doc.(map[string]interface{}) if !ok4 { // error return nil, fmt.Errorf("Slice contents aren't BSON objects") } docM = bson.M(docMap) } } d[i] = docM } return d, nil } return nil, fmt.Errorf("Unsupported input for bson.M slice: %#v\n", input) }
// ToBSONMap converts an interface{} to a bson.M. Nil is returned if the // conversion fails. func ToBSONMap(in interface{}) bson.M { m, ok := in.(bson.M) if ok { return m } d, ok2 := in.(bson.D) if ok2 { return d.Map() } m2, ok3 := in.(map[string]interface{}) if ok3 { return bson.M(m2) } return nil }
func (r *multiEnvRunner) updateOps(ops []txn.Op) []txn.Op { var opsNeedEnvAlive bool for i, op := range ops { if multiEnvCollections.Contains(op.C) { var docID interface{} if id, ok := op.Id.(string); ok { docID = addEnvUUID(r.envUUID, id) ops[i].Id = docID } else { docID = op.Id } if op.Insert != nil { switch doc := op.Insert.(type) { case bson.D: ops[i].Insert = r.updateBsonD(doc, docID, op.C) case bson.M: r.updateBsonM(doc, docID, op.C) case map[string]interface{}: r.updateBsonM(bson.M(doc), docID, op.C) default: if !r.updateStruct(doc, docID, op.C) { panic(fmt.Sprintf("unsupported document type for multi-environment collection "+ "(must be bson.D, bson.M or struct). Got %T for insert into %s.", doc, op.C)) } } if r.assertEnvAlive && !opsNeedEnvAlive && envAliveColls.Contains(op.C) { opsNeedEnvAlive = true } } } } if opsNeedEnvAlive { ops = append(ops, assertEnvAliveOp(r.envUUID)) } return ops }
// replaceSettingsOp returns a txn.Op that deletes the document's contents and // replaces it with the supplied values, and a function that should be called on // txn failure to determine whether this operation failed (due to a concurrent // settings change). func replaceSettingsOp(st *State, key string, values map[string]interface{}) (txn.Op, func() (bool, error), error) { s, err := readSettings(st, key) if err != nil { return txn.Op{}, nil, err } deletes := bson.M{} for k := range s.disk { if _, found := values[k]; !found { deletes[escapeReplacer.Replace(k)] = 1 } } newValues := copyMap(values, escapeReplacer.Replace) op := s.assertUnchangedOp() op.Update = setUnsetUpdateSettings(bson.M(newValues), deletes) assertFailed := func() (bool, error) { latest, err := readSettings(st, key) if err != nil { return false, err } return latest.version != s.version, nil } return op, assertFailed, nil }
func getMongoDBRecordID(request *messaging.ObjectRequest, obj map[string]interface{}) (returnID string) { isGUIDKey := false isAutoIncrementId := false //else MANUAL key from the user if obj == nil { //single request if (request.Controls.Id == "-999") || (request.Body.Parameters.AutoIncrement == true) { isAutoIncrementId = true } if (request.Controls.Id == "-888") || (request.Body.Parameters.GUIDKey == true) { isGUIDKey = true } } else { //multiple requests if (obj[request.Body.Parameters.KeyProperty].(string) == "-999") || (request.Body.Parameters.AutoIncrement == true) { isAutoIncrementId = true } if (obj[request.Body.Parameters.KeyProperty].(string) == "-888") || (request.Body.Parameters.GUIDKey == true) { isGUIDKey = true } } if isGUIDKey { request.Log("GUID Key generation requested!") returnID = uuid.NewV1().String() } else if isAutoIncrementId { request.Log("Automatic Increment Key generation requested!") collection, isError, _ := getCustomMongoConnection(request, getSQLnamespace(request), "domainClassAttributes") if isError { returnID = "" request.Log("Connecting to MongoDB Failed!") } else { //read Attributes table key := request.Controls.Class var data map[string]interface{} err := collection.Find(bson.M{"_id": key}).One(&data) fmt.Println(data) if err != nil { request.Log("This is a freshly created Class. Inserting new Class record.") var ObjectBody map[string]interface{} ObjectBody = make(map[string]interface{}) ObjectBody["_id"] = request.Controls.Class ObjectBody["maxCount"] = "1" ObjectBody["version"] = uuid.NewV1().String() err = collection.Insert(bson.M(ObjectBody)) if err != nil { request.Log("Inserting New DomainClassAttributes failed") returnID = "" } else { returnID = "1" } } else { var UpdatedCount int for fieldName, fieldvalue := range data { if strings.ToLower(fieldName) == "maxcount" { UpdatedCount, _ = strconv.Atoi(fieldvalue.(string)) UpdatedCount++ returnID = strconv.Itoa(UpdatedCount) break } } //update the table //save to attributes table data["maxCount"] = returnID data["version"] = uuid.NewV1().String() err := collection.Update(bson.M{"_id": key}, bson.M{"$set": data}) if err != nil { request.Log("Update of maxCount Failed") returnID = "" } } } } else { request.Log("Manual Key requested!") if obj == nil { returnID = request.Controls.Id } else { returnID = obj[request.Body.Parameters.KeyProperty].(string) } } return }
// UserByCreds Looks for user by email and password func (s mongoStg) UserSearch(ctx context.Context, filter model.Fields) (*model.User, error) { udb := &UserDB{} err := s.col(ctx).Find(bson.M(filter)).One(udb) return udb.Model(), err }
func (d *MongoDb) RunQuery(q Query) (result_slice_addr *[]map[string]interface{}, err error) { result_slice := make([]map[string]interface{}, 0) //result_slice = &result_slice_full c := d.Session.DB(d.dbname).C(q.Table) if q.Type == db.CREATE_NEW { if q.QueryBody != nil { err = c.Insert(q.QueryBody) return nil, err } else { return nil, empty_query } } else if q.Type == db.UPDATE || q.Type == db.EDIT { if q.KeyBody == nil { return nil, empty_key } if q.QueryBody == nil { return nil, empty_query } // err = c.Update(bson.M(q.KeyBody), bson.M{"$set": bson.M(q.QueryBody)}) return nil, err // } else if q.Type == db.GET || q.Type == db.GET_ALL || q.Type == db.CHECK_EXIST { if q.KeyBody != nil { if q.Type == db.GET_ALL { err = c.Find(bson.M(q.KeyBody)).All(&result_slice) if err == nil { return &result_slice, err } else { return nil, err } } else { result := make(map[string]interface{}) err = c.Find(bson.M(q.KeyBody)).One(&result) result_slice = append(result_slice, result) if err == nil { return &result_slice, err } else { return nil, err } } } else { return nil, empty_key } } else if q.Type == db.REMOVE { if q.KeyBody != nil { err = c.Remove(bson.M(q.KeyBody)) return nil, err } else { return nil, empty_key } } else if q.Type == db.INSERT_ITEM || q.Type == db.REMOVE_ITEM { if q.KeyBody == nil { return nil, empty_key } if q.QueryBody == nil { return nil, empty_query } if q.Type == db.INSERT_ITEM { err = c.Update(bson.M(q.KeyBody), bson.M{"$push": bson.M(q.QueryBody)}) return nil, err } else { err = c.Update(bson.M(q.KeyBody), bson.M{"$pull": bson.M(q.QueryBody)}) return nil, err } } else { return nil, incorrect_query_type } // err = c.Find(bson.M(q.KeyBody)).One(&result) // if err != nil { // return result,err // } // return result, err }
func (uis *UIServer) modifyProject(w http.ResponseWriter, r *http.Request) { _ = MustHaveUser(r) vars := mux.Vars(r) id := vars["project_id"] projectRef, err := model.FindOneProjectRef(id) if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, err) return } if projectRef == nil { http.Error(w, "Project not found", http.StatusNotFound) return } responseRef := struct { Identifier string `json:"id"` DisplayName string `json:"display_name"` RemotePath string `json:"remote_path"` BatchTime int `json:"batch_time"` DeactivatePrevious bool `json:"deactivate_previous"` Branch string `json:"branch_name"` ProjVarsMap map[string]string `json:"project_vars"` Enabled bool `json:"enabled"` Private bool `json:"private"` Owner string `json:"owner_name"` Repo string `json:"repo_name"` AlertConfig map[string][]struct { Provider string `json:"provider"` Settings map[string]interface{} `json:"settings"` } `json:"alert_config"` }{} err = util.ReadJSONInto(r.Body, &responseRef) if err != nil { http.Error(w, fmt.Sprintf("Error parsing request body %v", err), http.StatusInternalServerError) return } projectRef.DisplayName = responseRef.DisplayName projectRef.RemotePath = responseRef.RemotePath projectRef.BatchTime = responseRef.BatchTime projectRef.Branch = responseRef.Branch projectRef.Enabled = responseRef.Enabled projectRef.Private = responseRef.Private projectRef.Owner = responseRef.Owner projectRef.DeactivatePrevious = responseRef.DeactivatePrevious projectRef.Repo = responseRef.Repo projectRef.Identifier = id projectRef.Alerts = map[string][]model.AlertConfig{} for triggerId, alerts := range responseRef.AlertConfig { //TODO validate the triggerID, provider, and settings. for _, alert := range alerts { projectRef.Alerts[triggerId] = append(projectRef.Alerts[triggerId], model.AlertConfig{ Provider: alert.Provider, Settings: bson.M(alert.Settings), }) } } err = projectRef.Upsert() if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, err) return } //modify project vars if necessary projectVars := model.ProjectVars{id, responseRef.ProjVarsMap} _, err = projectVars.Upsert() if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, err) return } allProjects, err := model.FindAllProjectRefs() if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, err) return } data := struct { AllProjects []model.ProjectRef }{allProjects} uis.WriteJSON(w, http.StatusOK, data) }
// Dump handles some final options checking and executes MongoDump. func (dump *MongoDump) Dump() (err error) { defer dump.sessionProvider.Close() dump.shutdownIntentsNotifier = newNotifier() if dump.InputOptions.HasQuery() { // parse JSON then convert extended JSON values var asJSON interface{} content, err := dump.InputOptions.GetQuery() if err != nil { return err } err = json.Unmarshal(content, &asJSON) if err != nil { return fmt.Errorf("error parsing query as json: %v", err) } convertedJSON, err := bsonutil.ConvertJSONValueToBSON(asJSON) if err != nil { return fmt.Errorf("error converting query to bson: %v", err) } asMap, ok := convertedJSON.(map[string]interface{}) if !ok { // unlikely to be reached return fmt.Errorf("query is not in proper format") } dump.query = bson.M(asMap) } if dump.OutputOptions.DumpDBUsersAndRoles { // first make sure this is possible with the connected database dump.authVersion, err = auth.GetAuthVersion(dump.sessionProvider) if err == nil { err = auth.VerifySystemAuthVersion(dump.sessionProvider) } if err != nil { return fmt.Errorf("error getting auth schema version for dumpDbUsersAndRoles: %v", err) } log.Logvf(log.DebugLow, "using auth schema version %v", dump.authVersion) if dump.authVersion < 3 { return fmt.Errorf("backing up users and roles is only supported for "+ "deployments with auth schema versions >= 3, found: %v", dump.authVersion) } } if dump.OutputOptions.Archive != "" { //getArchiveOut gives us a WriteCloser to which we should write the archive var archiveOut io.WriteCloser archiveOut, err = dump.getArchiveOut() if err != nil { return err } dump.archive = &archive.Writer{ // The archive.Writer needs its own copy of archiveOut because things // like the prelude are not written by the multiplexer. Out: archiveOut, Mux: archive.NewMultiplexer(archiveOut, dump.shutdownIntentsNotifier), } go dump.archive.Mux.Run() defer func() { // The Mux runs until its Control is closed close(dump.archive.Mux.Control) muxErr := <-dump.archive.Mux.Completed archiveOut.Close() if muxErr != nil { if err != nil { err = fmt.Errorf("archive writer: %v / %v", err, muxErr) } else { err = fmt.Errorf("archive writer: %v", muxErr) } log.Logvf(log.DebugLow, "%v", err) } else { log.Logvf(log.DebugLow, "mux completed successfully") } }() } // switch on what kind of execution to do switch { case dump.ToolOptions.DB == "" && dump.ToolOptions.Collection == "": err = dump.CreateAllIntents() case dump.ToolOptions.DB != "" && dump.ToolOptions.Collection == "": err = dump.CreateIntentsForDatabase(dump.ToolOptions.DB) case dump.ToolOptions.DB != "" && dump.ToolOptions.Collection != "": err = dump.CreateCollectionIntent(dump.ToolOptions.DB, dump.ToolOptions.Collection) } if err != nil { return err } if dump.OutputOptions.Oplog { err = dump.CreateOplogIntents() if err != nil { return err } } if dump.OutputOptions.DumpDBUsersAndRoles && dump.ToolOptions.DB != "admin" { err = dump.CreateUsersRolesVersionIntentsForDB(dump.ToolOptions.DB) if err != nil { return err } } // verify we can use repair cursors if dump.OutputOptions.Repair { log.Logv(log.DebugLow, "verifying that the connected server supports repairCursor") if dump.isMongos { return fmt.Errorf("cannot use --repair on mongos") } exampleIntent := dump.manager.Peek() if exampleIntent != nil { supported, err := dump.sessionProvider.SupportsRepairCursor( exampleIntent.DB, exampleIntent.C) if !supported { return err // no extra context needed } } } // IO Phase I // metadata, users, roles, and versions // TODO, either remove this debug or improve the language log.Logvf(log.DebugHigh, "dump phase I: metadata, indexes, users, roles, version") err = dump.DumpMetadata() if err != nil { return fmt.Errorf("error dumping metadata: %v", err) } if dump.OutputOptions.Archive != "" { session, err := dump.sessionProvider.GetSession() if err != nil { return err } defer session.Close() buildInfo, err := session.BuildInfo() var serverVersion string if err != nil { log.Logvf(log.Always, "warning, couldn't get version information from server: %v", err) serverVersion = "unknown" } else { serverVersion = buildInfo.Version } dump.archive.Prelude, err = archive.NewPrelude(dump.manager, dump.OutputOptions.NumParallelCollections, serverVersion) if err != nil { return fmt.Errorf("creating archive prelude: %v", err) } err = dump.archive.Prelude.Write(dump.archive.Out) if err != nil { return fmt.Errorf("error writing metadata into archive: %v", err) } } err = dump.DumpSystemIndexes() if err != nil { return fmt.Errorf("error dumping system indexes: %v", err) } if dump.ToolOptions.DB == "admin" || dump.ToolOptions.DB == "" { err = dump.DumpUsersAndRoles() if err != nil { return fmt.Errorf("error dumping users and roles: %v", err) } } if dump.OutputOptions.DumpDBUsersAndRoles { log.Logvf(log.Always, "dumping users and roles for %v", dump.ToolOptions.DB) if dump.ToolOptions.DB == "admin" { log.Logvf(log.Always, "skipping users/roles dump, already dumped admin database") } else { err = dump.DumpUsersAndRolesForDB(dump.ToolOptions.DB) if err != nil { return fmt.Errorf("error dumping users and roles for db: %v", err) } } } // If oplog capturing is enabled, we first check the most recent // oplog entry and save its timestamp, this will let us later // copy all oplog entries that occurred while dumping, creating // what is effectively a point-in-time snapshot. if dump.OutputOptions.Oplog { err := dump.determineOplogCollectionName() if err != nil { return fmt.Errorf("error finding oplog: %v", err) } log.Logvf(log.Info, "getting most recent oplog timestamp") dump.oplogStart, err = dump.getOplogStartTime() if err != nil { return fmt.Errorf("error getting oplog start: %v", err) } } if failpoint.Enabled(failpoint.PauseBeforeDumping) { time.Sleep(15 * time.Second) } // IO Phase II // regular collections // TODO, either remove this debug or improve the language log.Logvf(log.DebugHigh, "dump phase II: regular collections") // begin dumping intents if err := dump.DumpIntents(); err != nil { return err } // IO Phase III // oplog // TODO, either remove this debug or improve the language log.Logvf(log.DebugLow, "dump phase III: the oplog") // If we are capturing the oplog, we dump all oplog entries that occurred // while dumping the database. Before and after dumping the oplog, // we check to see if the oplog has rolled over (i.e. the most recent entry when // we started still exist, so we know we haven't lost data) if dump.OutputOptions.Oplog { log.Logvf(log.DebugLow, "checking if oplog entry %v still exists", dump.oplogStart) exists, err := dump.checkOplogTimestampExists(dump.oplogStart) if !exists { return fmt.Errorf( "oplog overflow: mongodump was unable to capture all new oplog entries during execution") } if err != nil { return fmt.Errorf("unable to check oplog for overflow: %v", err) } log.Logvf(log.DebugHigh, "oplog entry %v still exists", dump.oplogStart) log.Logvf(log.Always, "writing captured oplog to %v", dump.manager.Oplog().Location) err = dump.DumpOplogAfterTimestamp(dump.oplogStart) if err != nil { return fmt.Errorf("error dumping oplog: %v", err) } // check the oplog for a rollover one last time, to avoid a race condition // wherein the oplog rolls over in the time after our first check, but before // we copy it. log.Logvf(log.DebugLow, "checking again if oplog entry %v still exists", dump.oplogStart) exists, err = dump.checkOplogTimestampExists(dump.oplogStart) if !exists { return fmt.Errorf( "oplog overflow: mongodump was unable to capture all new oplog entries during execution") } if err != nil { return fmt.Errorf("unable to check oplog for overflow: %v", err) } log.Logvf(log.DebugHigh, "oplog entry %v still exists", dump.oplogStart) } log.Logvf(log.DebugLow, "finishing dump") return err }
func (r *multiEnvRunner) updateOps(ops []txn.Op) ([]txn.Op, error) { var referencesEnviron bool var insertsEnvironSpecificDocs bool for i, op := range ops { info, found := r.schema[op.C] if !found { return nil, errors.Errorf("forbidden transaction: references unknown collection %q", op.C) } if info.rawAccess { return nil, errors.Errorf("forbidden transaction: references raw-access collection %q", op.C) } if !info.global { // TODO(fwereade): this interface implies we're returning a copy // of the transactions -- as I think we should be -- rather than // rewriting them in place (which IMO breaks client expectations // pretty hard, not to mention rendering us unable to accept any // structs passed by value, or which lack an env-uuid field). // // The counterargument is that it's convenient to use rewritten // docs directly to construct entities; I think that's suboptimal, // because the cost of a DB read to just grab the actual data pales // in the face of the transaction operation itself, and it's a // small price to pay for a safer implementation. var docID interface{} if id, ok := op.Id.(string); ok { docID = ensureEnvUUID(r.envUUID, id) ops[i].Id = docID } else { docID = op.Id } if op.Insert != nil { var err error switch doc := op.Insert.(type) { case bson.D: ops[i].Insert, err = r.updateBsonD(doc, docID) case bson.M: err = r.updateBsonM(doc, docID) case map[string]interface{}: err = r.updateBsonM(bson.M(doc), docID) default: err = r.updateStruct(doc, docID) } if err != nil { return nil, errors.Annotatef(err, "cannot insert into %q", op.C) } if !info.insertWithoutEnvironment { insertsEnvironSpecificDocs = true } } } if op.C == environmentsC { if op.Id == r.envUUID { referencesEnviron = true } } } if insertsEnvironSpecificDocs && !referencesEnviron { // TODO(fwereade): This serializes a large proportion of operations // that could otherwise run in parallel. it's quite nice to be able // to run more than one transaction per environment at once... // // Consider representing environ life with a collection of N docs, // and selecting different ones per transaction, so as to claw back // parallelism of up to N. (Environ dying would update all docs and // thus end up serializing everything, but at least we get some // benefits for the bulk of an environment's lifetime.) ops = append(ops, assertEnvAliveOp(r.envUUID)) } logger.Tracef("rewrote transaction: %#v", ops) return ops, nil }