예제 #1
0
func ParseReadPreference(rp string) (mgo.Mode, bson.D, error) {
	var mode string
	var tags bson.D
	if rp == "" {
		return mgo.Nearest, nil, nil
	}
	if rp[0] != '{' {
		mode = rp
	} else {
		var doc readPrefDoc
		err := json.Unmarshal([]byte(rp), &doc)
		if err != nil {
			return 0, nil, fmt.Errorf("invalid --ReadPreferences json object: %v", err)
		}
		tags = doc.Tags
		mode = doc.Mode
	}
	switch mode {
	case "primary":
		return mgo.Primary, tags, nil
	case "primaryPreferred":
		return mgo.PrimaryPreferred, tags, nil
	case "secondary":
		return mgo.Secondary, tags, nil
	case "secondaryPreferred":
		return mgo.SecondaryPreferred, tags, nil
	case "nearest":
		return mgo.Nearest, tags, nil
	}
	return 0, nil, fmt.Errorf("invalid readPreference mode '%v'", mode)
}
예제 #2
0
func TestKerberos(t *testing.T) {
	testutil.VerifyTestType(t, testutil.KerberosTestType)

	Convey("Should be able to run mongoexport with Kerberos auth", t, func() {
		opts, err := testutil.GetKerberosOptions()

		So(err, ShouldBeNil)

		sessionProvider, err := db.NewSessionProvider(*opts)
		So(err, ShouldBeNil)

		export := MongoExport{
			ToolOptions:     *opts,
			OutputOpts:      &OutputFormatOptions{},
			InputOpts:       &InputOptions{},
			SessionProvider: sessionProvider,
		}

		var out bytes.Buffer
		num, err := export.exportInternal(&out)

		So(err, ShouldBeNil)
		So(num, ShouldEqual, 1)
		outputLines := strings.Split(strings.TrimSpace(out.String()), "\n")
		So(len(outputLines), ShouldEqual, 1)
		outMap := map[string]interface{}{}
		So(json.Unmarshal([]byte(outputLines[0]), &outMap), ShouldBeNil)
		So(outMap["kerberos"], ShouldEqual, true)
		So(outMap["authenticated"], ShouldEqual, "yeah")
		So(outMap["_id"].(map[string]interface{})["$oid"], ShouldEqual, "528fb35afb3a8030e2f643c3")
	})
}
예제 #3
0
func TestJSONArray(t *testing.T) {
	testutil.VerifyTestType(t, testutil.UnitTestType)

	Convey("With a JSON export output in array mode", t, func() {
		out := &bytes.Buffer{}
		Convey("exporting a bunch of documents should produce valid json", func() {
			jsonExporter := NewJSONExportOutput(true, false, out)
			err := jsonExporter.WriteHeader()
			So(err, ShouldBeNil)

			// Export a few docs of various types

			testObjs := []interface{}{bson.NewObjectId(), "asd", 12345, 3.14159, bson.M{"A": 1}}
			for _, obj := range testObjs {
				err = jsonExporter.ExportDocument(bson.M{"_id": obj})
				So(err, ShouldBeNil)
			}

			err = jsonExporter.WriteFooter()
			So(err, ShouldBeNil)
			// Unmarshal the whole thing, it should be valid json
			fromJSON := []map[string]interface{}{}
			err = json.Unmarshal(out.Bytes(), &fromJSON)
			So(err, ShouldBeNil)
			So(len(fromJSON), ShouldEqual, len(testObjs))

		})

		Reset(func() {
			out.Reset()
		})

	})
}
예제 #4
0
// MetadataFromJSON takes a slice of JSON bytes and unmarshals them into usable
// collection options and indexes for restoring collections.
func (restore *MongoRestore) MetadataFromJSON(jsonBytes []byte) (bson.D, []IndexDocument, error) {
	if len(jsonBytes) == 0 {
		// skip metadata parsing if the file is empty
		return nil, nil, nil
	}

	meta := &Metadata{}

	err := json.Unmarshal(jsonBytes, meta)
	if err != nil {
		return nil, nil, err
	}

	// first get the ordered key information for each index,
	// then merge it with a set of options stored as a map
	metaAsMap := metaDataMapIndex{}
	err = json.Unmarshal(jsonBytes, &metaAsMap)
	if err != nil {
		return nil, nil, fmt.Errorf("error unmarshalling metadata as map: %v", err)
	}
	for i := range meta.Indexes {
		// remove "key" from the map so we can decode it properly later
		delete(metaAsMap.Indexes[i], "key")

		// parse extra index fields
		meta.Indexes[i].Options = metaAsMap.Indexes[i]
		if err := bsonutil.ConvertJSONDocumentToBSON(meta.Indexes[i].Options); err != nil {
			return nil, nil, fmt.Errorf("extended json error: %v", err)
		}

		// parse the values of the index keys, so we can support extended json
		for pos, field := range meta.Indexes[i].Key {
			meta.Indexes[i].Key[pos].Value, err = bsonutil.ParseJSONValue(field.Value)
			if err != nil {
				return nil, nil, fmt.Errorf("extended json in '%v' field: %v", field.Name, err)
			}
		}
	}

	// parse the values of options fields, to support extended json
	meta.Options, err = bsonutil.GetExtendedBsonD(meta.Options)
	if err != nil {
		return nil, nil, fmt.Errorf("extended json in 'options': %v", err)
	}

	return meta.Options, meta.Indexes, nil
}
예제 #5
0
// Dump handles some final options checking and executes MongoDump
func (dump *MongoDump) Dump() error {
	err := dump.ValidateOptions()
	if err != nil {
		return fmt.Errorf("Bad Option: %v", err)
	}

	if dump.InputOptions.Query != "" {
		// TODO, check for extended json support...
		// gonna need to do some exploring later on, since im 95% sure
		// this is undefined in the current tools
		err = json.Unmarshal([]byte(dump.InputOptions.Query), &dump.query)
		if err != nil {
			return fmt.Errorf("error parsing query: %v", err)
		}
	}

	if dump.OutputOptions.Out == "-" {
		dump.useStdout = true
	}

	if dump.OutputOptions.DumpDBUsersAndRoles {
		//first make sure this is possible with the connected database
		dump.authVersion, err = auth.GetAuthVersion(dump.SessionProvider.GetSession())
		if err != nil {
			return fmt.Errorf("error getting auth schema version for dumpDbUsersAndRoles: %v", err)
		}
		log.Logf(2, "using auth schema version %v", dump.authVersion)
		if dump.authVersion != 3 {
			return fmt.Errorf("backing up users and roles is only supported for "+
				"deployments with auth schema versions 3, found: %v", dump.authVersion)
		}
	}

	//switch on what kind of execution to do
	switch {
	case dump.ToolOptions.DB == "" && dump.ToolOptions.Collection == "":
		err = dump.DumpEverything()
	case dump.ToolOptions.DB != "" && dump.ToolOptions.Collection == "":
		err = dump.DumpDatabase(dump.ToolOptions.DB)
	case dump.ToolOptions.DB != "" && dump.ToolOptions.Collection != "":
		err = dump.DumpCollection(dump.ToolOptions.DB, dump.ToolOptions.Collection)
	}

	if dump.OutputOptions.DumpDBUsersAndRoles {
		log.Logf(0, "dumping users and roles for %v", dump.ToolOptions.DB)
		if dump.ToolOptions.DB == "admin" {
			log.Logf(0, "skipping users/roles dump, already dumped admin database")
		} else {
			err = dump.DumpUsersAndRolesForDB(dump.ToolOptions.DB)
			if err != nil {
				return fmt.Errorf("error dumping users and roles: %v", err)
			}
		}
	}

	log.Logf(1, "done")

	return err
}
예제 #6
0
//getSortFromArg takes a sort specification in JSON and returns it as a bson.D
//object which preserves the ordering of the keys as they appear in the input.
func getSortFromArg(queryRaw string) (bson.D, error) {
	parsedJSON := bson.D{}
	err := sloppyjson.Unmarshal([]byte(queryRaw), &parsedJSON)
	if err != nil {
		return nil, fmt.Errorf("Query is not valid JSON: %v", err)
	}
	return parsedJSON, nil
}
예제 #7
0
// getSortFromArg takes a sort specification in JSON and returns it as a bson.D
// object which preserves the ordering of the keys as they appear in the input.
func getSortFromArg(queryRaw string) (bson.D, error) {
	parsedJSON := bson.D{}
	err := json.Unmarshal([]byte(queryRaw), &parsedJSON)
	if err != nil {
		return nil, fmt.Errorf("query '%v' is not valid JSON: %v", queryRaw, err)
	}
	// TODO: verify sort specification before returning a nil error
	return parsedJSON, nil
}
예제 #8
0
// Convert a json string to a raw document
func parseJson(jsonText string) (Document, error) {
	rawObj := Document{}
	err := json.Unmarshal([]byte(jsonText), &rawObj)

	if err != nil {
		return rawObj, err
	}
	err = normalizeObj(rawObj)
	return rawObj, err
}
예제 #9
0
// getObjectFromByteArg takes an object in extended JSON, and converts it to an object that
// can be passed straight to db.collection.find(...) as a query or sort critera.
// Returns an error if the string is not valid JSON, or extended JSON.
func getObjectFromByteArg(queryRaw []byte) (map[string]interface{}, error) {
	parsedJSON := map[string]interface{}{}
	err := json.Unmarshal(queryRaw, &parsedJSON)
	if err != nil {
		return nil, fmt.Errorf("query '%v' is not valid JSON: %v", queryRaw, err)
	}

	err = bsonutil.ConvertJSONDocumentToBSON(parsedJSON)
	if err != nil {
		return nil, err
	}
	return parsedJSON, nil
}
예제 #10
0
파일: mongofiles.go 프로젝트: Machyne/mongo
// parse and convert extended JSON
func (mf *MongoFiles) parseID() (interface{}, error) {
	// parse the id using extended json
	var asJSON interface{}
	err := json.Unmarshal([]byte(mf.FileName), &asJSON)
	if err != nil {
		return nil, fmt.Errorf(
			"error parsing _id as json: %v; make sure you are properly escaping input", err)
	}
	id, err := bsonutil.ConvertJSONValueToBSON(asJSON)
	if err != nil {
		return nil, fmt.Errorf("error converting _id to bson: %v", err)
	}
	return id, nil
}
예제 #11
0
//getObjectFromArg takes an object in extended JSON, and converts it to an object that
//can be passed straight to db.collection.find(...) as a query or sort critera.
//Returns an error if the string is not valid JSON, or extended JSON.
func getObjectFromArg(queryRaw string) (map[string]interface{}, error) {
	parsedJSON := map[string]interface{}{}
	err := sloppyjson.Unmarshal([]byte(queryRaw), &parsedJSON)
	if err != nil {
		return nil, fmt.Errorf("Query is not valid JSON: %v", err)
	}

	for key, val := range parsedJSON {
		if valSubDoc, ok := val.(map[string]interface{}); ok {
			newVal, err := bsonutil.ParseSpecialKeys(valSubDoc)
			if err != nil {
				return nil, fmt.Errorf("Error in query: %v", err)
			}
			parsedJSON[key] = newVal
		}
	}
	return parsedJSON, nil
}
예제 #12
0
func TestMongoDumpMetaData(t *testing.T) {
	testutil.VerifyTestType(t, testutil.IntegrationTestType)
	log.SetWriter(ioutil.Discard)

	Convey("With a MongoDump instance", t, func() {
		err := setUpMongoDumpTestData()
		So(err, ShouldBeNil)

		Convey("testing that the dumped directory contains information about indexes", func() {
			md := simpleMongoDumpInstance()
			md.OutputOptions.Out = "dump"
			err = md.Init()
			So(err, ShouldBeNil)

			err = md.Dump()
			So(err, ShouldBeNil)

			path, err := os.Getwd()
			So(err, ShouldBeNil)
			dumpDir := util.ToUniversalPath(filepath.Join(path, "dump"))
			dumpDBDir := util.ToUniversalPath(filepath.Join(dumpDir, testDB))
			So(fileDirExists(dumpDir), ShouldBeTrue)
			So(fileDirExists(dumpDBDir), ShouldBeTrue)

			Convey("having one metadata file per collection", func() {
				c1, err := countNonIndexBSONFiles(dumpDBDir)
				So(err, ShouldBeNil)

				c2, err := countMetaDataFiles(dumpDBDir)
				So(err, ShouldBeNil)

				So(c1, ShouldEqual, c2)

				Convey("and that the JSON in a metadata file is valid", func() {
					metaFiles, err := getMatchingFiles(dumpDBDir, ".*\\.metadata\\.json")
					So(err, ShouldBeNil)
					So(len(metaFiles), ShouldBeGreaterThan, 0)

					oneMetaFile, err := os.Open(util.ToUniversalPath(filepath.Join(dumpDBDir, metaFiles[0])))
					So(err, ShouldBeNil)
					contents, err := ioutil.ReadAll(oneMetaFile)
					var jsonResult map[string]interface{}
					err = json.Unmarshal(contents, &jsonResult)
					So(err, ShouldBeNil)

					Convey("and contains an 'indexes' key", func() {
						_, ok := jsonResult["indexes"]
						So(ok, ShouldBeTrue)
						So(oneMetaFile.Close(), ShouldBeNil)
					})

				})

			})

			Reset(func() {
				So(os.RemoveAll(dumpDir), ShouldBeNil)
			})
		})

		Reset(func() {
			So(tearDownMongoDumpTestData(), ShouldBeNil)
		})

	})
}
예제 #13
0
// Dump handles some final options checking and executes MongoDump.
func (dump *MongoDump) Dump() (err error) {
	defer dump.sessionProvider.Close()

	dump.shutdownIntentsNotifier = newNotifier()

	if dump.InputOptions.HasQuery() {
		// parse JSON then convert extended JSON values
		var asJSON interface{}
		content, err := dump.InputOptions.GetQuery()
		if err != nil {
			return err
		}
		err = json.Unmarshal(content, &asJSON)
		if err != nil {
			return fmt.Errorf("error parsing query as json: %v", err)
		}
		convertedJSON, err := bsonutil.ConvertJSONValueToBSON(asJSON)
		if err != nil {
			return fmt.Errorf("error converting query to bson: %v", err)
		}
		asMap, ok := convertedJSON.(map[string]interface{})
		if !ok {
			// unlikely to be reached
			return fmt.Errorf("query is not in proper format")
		}
		dump.query = bson.M(asMap)
	}

	if dump.OutputOptions.DumpDBUsersAndRoles {
		// first make sure this is possible with the connected database
		dump.authVersion, err = auth.GetAuthVersion(dump.sessionProvider)
		if err == nil {
			err = auth.VerifySystemAuthVersion(dump.sessionProvider)
		}
		if err != nil {
			return fmt.Errorf("error getting auth schema version for dumpDbUsersAndRoles: %v", err)
		}
		log.Logvf(log.DebugLow, "using auth schema version %v", dump.authVersion)
		if dump.authVersion < 3 {
			return fmt.Errorf("backing up users and roles is only supported for "+
				"deployments with auth schema versions >= 3, found: %v", dump.authVersion)
		}
	}

	if dump.OutputOptions.Archive != "" {
		//getArchiveOut gives us a WriteCloser to which we should write the archive
		var archiveOut io.WriteCloser
		archiveOut, err = dump.getArchiveOut()
		if err != nil {
			return err
		}
		dump.archive = &archive.Writer{
			// The archive.Writer needs its own copy of archiveOut because things
			// like the prelude are not written by the multiplexer.
			Out: archiveOut,
			Mux: archive.NewMultiplexer(archiveOut, dump.shutdownIntentsNotifier),
		}
		go dump.archive.Mux.Run()
		defer func() {
			// The Mux runs until its Control is closed
			close(dump.archive.Mux.Control)
			muxErr := <-dump.archive.Mux.Completed
			archiveOut.Close()
			if muxErr != nil {
				if err != nil {
					err = fmt.Errorf("archive writer: %v / %v", err, muxErr)
				} else {
					err = fmt.Errorf("archive writer: %v", muxErr)
				}
				log.Logvf(log.DebugLow, "%v", err)
			} else {
				log.Logvf(log.DebugLow, "mux completed successfully")
			}
		}()
	}

	// switch on what kind of execution to do
	switch {
	case dump.ToolOptions.DB == "" && dump.ToolOptions.Collection == "":
		err = dump.CreateAllIntents()
	case dump.ToolOptions.DB != "" && dump.ToolOptions.Collection == "":
		err = dump.CreateIntentsForDatabase(dump.ToolOptions.DB)
	case dump.ToolOptions.DB != "" && dump.ToolOptions.Collection != "":
		err = dump.CreateCollectionIntent(dump.ToolOptions.DB, dump.ToolOptions.Collection)
	}
	if err != nil {
		return err
	}

	if dump.OutputOptions.Oplog {
		err = dump.CreateOplogIntents()
		if err != nil {
			return err
		}
	}

	if dump.OutputOptions.DumpDBUsersAndRoles && dump.ToolOptions.DB != "admin" {
		err = dump.CreateUsersRolesVersionIntentsForDB(dump.ToolOptions.DB)
		if err != nil {
			return err
		}
	}

	// verify we can use repair cursors
	if dump.OutputOptions.Repair {
		log.Logv(log.DebugLow, "verifying that the connected server supports repairCursor")
		if dump.isMongos {
			return fmt.Errorf("cannot use --repair on mongos")
		}
		exampleIntent := dump.manager.Peek()
		if exampleIntent != nil {
			supported, err := dump.sessionProvider.SupportsRepairCursor(
				exampleIntent.DB, exampleIntent.C)
			if !supported {
				return err // no extra context needed
			}
		}
	}

	// IO Phase I
	// metadata, users, roles, and versions

	// TODO, either remove this debug or improve the language
	log.Logvf(log.DebugHigh, "dump phase I: metadata, indexes, users, roles, version")

	err = dump.DumpMetadata()
	if err != nil {
		return fmt.Errorf("error dumping metadata: %v", err)
	}

	if dump.OutputOptions.Archive != "" {
		session, err := dump.sessionProvider.GetSession()
		if err != nil {
			return err
		}
		defer session.Close()
		buildInfo, err := session.BuildInfo()
		var serverVersion string
		if err != nil {
			log.Logvf(log.Always, "warning, couldn't get version information from server: %v", err)
			serverVersion = "unknown"
		} else {
			serverVersion = buildInfo.Version
		}
		dump.archive.Prelude, err = archive.NewPrelude(dump.manager, dump.OutputOptions.NumParallelCollections, serverVersion)
		if err != nil {
			return fmt.Errorf("creating archive prelude: %v", err)
		}
		err = dump.archive.Prelude.Write(dump.archive.Out)
		if err != nil {
			return fmt.Errorf("error writing metadata into archive: %v", err)
		}
	}

	err = dump.DumpSystemIndexes()
	if err != nil {
		return fmt.Errorf("error dumping system indexes: %v", err)
	}

	if dump.ToolOptions.DB == "admin" || dump.ToolOptions.DB == "" {
		err = dump.DumpUsersAndRoles()
		if err != nil {
			return fmt.Errorf("error dumping users and roles: %v", err)
		}
	}
	if dump.OutputOptions.DumpDBUsersAndRoles {
		log.Logvf(log.Always, "dumping users and roles for %v", dump.ToolOptions.DB)
		if dump.ToolOptions.DB == "admin" {
			log.Logvf(log.Always, "skipping users/roles dump, already dumped admin database")
		} else {
			err = dump.DumpUsersAndRolesForDB(dump.ToolOptions.DB)
			if err != nil {
				return fmt.Errorf("error dumping users and roles for db: %v", err)
			}
		}
	}

	// If oplog capturing is enabled, we first check the most recent
	// oplog entry and save its timestamp, this will let us later
	// copy all oplog entries that occurred while dumping, creating
	// what is effectively a point-in-time snapshot.
	if dump.OutputOptions.Oplog {
		err := dump.determineOplogCollectionName()
		if err != nil {
			return fmt.Errorf("error finding oplog: %v", err)
		}
		log.Logvf(log.Info, "getting most recent oplog timestamp")
		dump.oplogStart, err = dump.getOplogStartTime()
		if err != nil {
			return fmt.Errorf("error getting oplog start: %v", err)
		}
	}

	if failpoint.Enabled(failpoint.PauseBeforeDumping) {
		time.Sleep(15 * time.Second)
	}

	// IO Phase II
	// regular collections

	// TODO, either remove this debug or improve the language
	log.Logvf(log.DebugHigh, "dump phase II: regular collections")

	// begin dumping intents
	if err := dump.DumpIntents(); err != nil {
		return err
	}

	// IO Phase III
	// oplog

	// TODO, either remove this debug or improve the language
	log.Logvf(log.DebugLow, "dump phase III: the oplog")

	// If we are capturing the oplog, we dump all oplog entries that occurred
	// while dumping the database. Before and after dumping the oplog,
	// we check to see if the oplog has rolled over (i.e. the most recent entry when
	// we started still exist, so we know we haven't lost data)
	if dump.OutputOptions.Oplog {
		log.Logvf(log.DebugLow, "checking if oplog entry %v still exists", dump.oplogStart)
		exists, err := dump.checkOplogTimestampExists(dump.oplogStart)
		if !exists {
			return fmt.Errorf(
				"oplog overflow: mongodump was unable to capture all new oplog entries during execution")
		}
		if err != nil {
			return fmt.Errorf("unable to check oplog for overflow: %v", err)
		}
		log.Logvf(log.DebugHigh, "oplog entry %v still exists", dump.oplogStart)

		log.Logvf(log.Always, "writing captured oplog to %v", dump.manager.Oplog().Location)
		err = dump.DumpOplogAfterTimestamp(dump.oplogStart)
		if err != nil {
			return fmt.Errorf("error dumping oplog: %v", err)
		}

		// check the oplog for a rollover one last time, to avoid a race condition
		// wherein the oplog rolls over in the time after our first check, but before
		// we copy it.
		log.Logvf(log.DebugLow, "checking again if oplog entry %v still exists", dump.oplogStart)
		exists, err = dump.checkOplogTimestampExists(dump.oplogStart)
		if !exists {
			return fmt.Errorf(
				"oplog overflow: mongodump was unable to capture all new oplog entries during execution")
		}
		if err != nil {
			return fmt.Errorf("unable to check oplog for overflow: %v", err)
		}
		log.Logvf(log.DebugHigh, "oplog entry %v still exists", dump.oplogStart)
	}

	log.Logvf(log.DebugLow, "finishing dump")

	return err
}
예제 #14
0
// constructWCObject takes in a write concern and attempts to construct an
// mgo.Safe object from it. It returns an error if it is unable to parse the
// string or if a parsed write concern field value is invalid.
func constructWCObject(writeConcern string) (sessionSafety *mgo.Safe, err error) {
	sessionSafety = &mgo.Safe{}
	defer func() {
		// If the user passes a w value of 0, we set the session to use the
		// unacknowledged write concern but only if journal commit acknowledgment,
		// is not required. If commit acknowledgment is required, it prevails,
		// and the server will require that mongod acknowledge the write operation
		if sessionSafety.WMode == "" && sessionSafety.W == 0 && !sessionSafety.J {
			sessionSafety = nil
		}
	}()
	jsonWriteConcern := map[string]interface{}{}

	if err = json.Unmarshal([]byte(writeConcern), &jsonWriteConcern); err != nil {
		// if the writeConcern string can not be unmarshaled into JSON, this
		// allows a default to the old behavior wherein the entire argument
		// passed in is assigned to the 'w' field - thus allowing users pass
		// a write concern that looks like: "majority", 0, "4", etc.
		wValue, err := strconv.Atoi(writeConcern)
		if err != nil {
			sessionSafety.WMode = writeConcern
		} else {
			sessionSafety.W = wValue
			if wValue < 0 {
				return sessionSafety, fmt.Errorf("invalid '%v' argument: %v", w, wValue)
			}
		}
		return sessionSafety, nil
	}

	if jVal, ok := jsonWriteConcern[j]; ok && util.IsTruthy(jVal) {
		sessionSafety.J = true
	}

	if fsyncVal, ok := jsonWriteConcern[fSync]; ok && util.IsTruthy(fsyncVal) {
		sessionSafety.FSync = true
	}

	if wtimeout, ok := jsonWriteConcern[wTimeout]; ok {
		wtimeoutValue, err := util.ToInt(wtimeout)
		if err != nil {
			return sessionSafety, fmt.Errorf("invalid '%v' argument: %v", wTimeout, wtimeout)
		}
		sessionSafety.WTimeout = wtimeoutValue
	}

	if wInterface, ok := jsonWriteConcern[w]; ok {
		wValue, err := util.ToInt(wInterface)
		if err != nil {
			// if the argument is neither a string nor int, error out
			wStrVal, ok := wInterface.(string)
			if !ok {
				return sessionSafety, fmt.Errorf("invalid '%v' argument: %v", w, wInterface)
			}
			sessionSafety.WMode = wStrVal
		} else {
			sessionSafety.W = wValue
			if wValue < 0 {
				return sessionSafety, fmt.Errorf("invalid '%v' argument: %v", w, wValue)
			}
		}
	}
	return sessionSafety, nil
}